code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _snake_case ( nn.Module ):
def __init__( self : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE:Tuple = nn.Linear(3 ,4 )
SCREAMING_SNAKE_CASE:List[Any] = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE:List[Any] = nn.Linear(4 ,5 )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class _snake_case ( _a ):
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return (args[0] + 1,) + args[1:], kwargs
class _snake_case ( _a ):
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ):
return output + 1
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:str = ModelForTest()
SCREAMING_SNAKE_CASE:Optional[int] = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(test_model._hf_hook ,SCREAMING_SNAKE_CASE__ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,"forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ ,"_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ ,"_old_forward" ) )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Dict = ModelForTest()
SCREAMING_SNAKE_CASE:Optional[Any] = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,append=SCREAMING_SNAKE_CASE__ )
self.assertEqual(isinstance(test_model._hf_hook ,SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(test_model._hf_hook.hooks ) ,2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,"forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ ,"_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ ,"_old_forward" ) )
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:List[str] = ModelForTest()
SCREAMING_SNAKE_CASE:Tuple = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:Tuple = test_model(x + 1 )
SCREAMING_SNAKE_CASE:List[str] = test_model(x + 2 )
SCREAMING_SNAKE_CASE:List[str] = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
SCREAMING_SNAKE_CASE:Any = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
SCREAMING_SNAKE_CASE:Optional[int] = SequentialHook(PreForwardHook() ,PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-5 )
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:int = ModelForTest()
SCREAMING_SNAKE_CASE:List[Any] = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:Optional[Any] = test_model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,output + 1 ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
SCREAMING_SNAKE_CASE:Optional[int] = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,output + 1 ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
SCREAMING_SNAKE_CASE:List[Any] = SequentialHook(PostForwardHook() ,PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ ,output + 2 ,atol=1e-5 )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Any = ModelForTest()
SCREAMING_SNAKE_CASE:List[str] = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:Optional[int] = test_model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,output + 1 ) )
self.assertTrue(outputa.requires_grad )
SCREAMING_SNAKE_CASE:Tuple = True
SCREAMING_SNAKE_CASE:List[str] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
SCREAMING_SNAKE_CASE:List[str] = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:str = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device ,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE__ ,AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:str = torch.randn(2 ,3 ).to(0 )
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device ,torch.device(0 ) )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE:str = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara ,AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE:List[Any] = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device ,SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
# Now test with buffers included in the offload
SCREAMING_SNAKE_CASE:Dict = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara ,AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device("meta" ) )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:str = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device ,SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE:Optional[Any] = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(SCREAMING_SNAKE_CASE__ ,execution_device=SCREAMING_SNAKE_CASE__ ,offload=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE:Tuple = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device ,SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE__ ,execution_device=SCREAMING_SNAKE_CASE__ ,offload=SCREAMING_SNAKE_CASE__ ,offload_buffers=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device("meta" ) )
SCREAMING_SNAKE_CASE:List[Any] = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device ,SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE:List[Any] = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ ,execution_device=SCREAMING_SNAKE_CASE__ ,offload=SCREAMING_SNAKE_CASE__ ,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE:Dict = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device ,SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ ,execution_device=SCREAMING_SNAKE_CASE__ ,offload=SCREAMING_SNAKE_CASE__ ,weights_map=model.state_dict() ,offload_buffers=SCREAMING_SNAKE_CASE__ ,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device("meta" ) )
SCREAMING_SNAKE_CASE:List[str] = torch.randn(2 ,3 )
SCREAMING_SNAKE_CASE:List[str] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device ,SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("cpu" ) )
| 143 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a_ : Optional[int] = HfArgumentParser(InitializationArguments)
a_ : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a_ : str = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
a_ : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a_ : Optional[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 675 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int =42
a : str =42
a : Dict =None
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
a : Tuple =2
@register_to_config
def __init__( self,__SCREAMING_SNAKE_CASE = 0.02,__SCREAMING_SNAKE_CASE = 1_00,__SCREAMING_SNAKE_CASE = 1.007,__SCREAMING_SNAKE_CASE = 80,__SCREAMING_SNAKE_CASE = 0.05,__SCREAMING_SNAKE_CASE = 50,):
'''simple docstring'''
__lowerCAmelCase = sigma_max
# setable values
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None # sigma(t_i)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
return sample
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = num_inference_steps
__lowerCAmelCase = np.arange(0,self.num_inference_steps )[::-1].copy()
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__lowerCAmelCase = torch.tensor(__SCREAMING_SNAKE_CASE,dtype=torch.floataa,device=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
__lowerCAmelCase = min(self.config.s_churn / self.num_inference_steps,2**0.5 - 1 )
else:
__lowerCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowerCAmelCase = self.config.s_noise * randn_tensor(sample.shape,generator=__SCREAMING_SNAKE_CASE ).to(sample.device )
__lowerCAmelCase = sigma + gamma * sigma
__lowerCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = True,):
'''simple docstring'''
__lowerCAmelCase = sample_hat + sigma_hat * model_output
__lowerCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
__lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__SCREAMING_SNAKE_CASE,derivative=__SCREAMING_SNAKE_CASE,pred_original_sample=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = True,):
'''simple docstring'''
__lowerCAmelCase = sample_prev + sigma_prev * model_output
__lowerCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
__lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__SCREAMING_SNAKE_CASE,derivative=__SCREAMING_SNAKE_CASE,pred_original_sample=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
raise NotImplementedError()
| 689 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a_ : List[Any] = get_logger(__name__)
class snake_case :
"""simple docstring"""
_lowerCamelCase = "dummy_data"
_lowerCamelCase = "datasets"
_lowerCamelCase = False
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = dataset_name
lowerCamelCase_ = cache_dir
lowerCamelCase_ = use_local_dummy_data
lowerCamelCase_ = config
# download_callbacks take a single url as input
lowerCamelCase_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCamelCase_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCamelCase_ = str(UpperCamelCase )
# to be downloaded
lowerCamelCase_ = None
lowerCamelCase_ = None
@property
def snake_case ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCamelCase_ = self.download_dummy_data()
return self._dummy_file
@property
def snake_case ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def snake_case ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCamelCase_ = cached_path(
UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase )
return os.path.join(UpperCamelCase , self.dummy_file_name )
@property
def snake_case ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def snake_case ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def snake_case ( self ):
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def snake_case ( self , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCamelCase_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCamelCase_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase , UpperCamelCase ):
return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase )
else:
return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase )
def snake_case ( self , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase )
def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return path
def snake_case ( self ):
"""simple docstring"""
return {}
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase , UpperCamelCase ):
for single_url in single_urls:
download_callback(UpperCamelCase )
else:
lowerCamelCase_ = single_urls
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls]
else:
lowerCamelCase_ = single_urls
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) )
lowerCamelCase_ = value
# make sure that values are unique
if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url )
lowerCamelCase_ = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(UpperCamelCase )
return dummy_data_list
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
def _iter_archive_members(UpperCamelCase ):
# this preserves the order of the members inside the ZIP archive
lowerCamelCase_ = Path(self.dummy_file ).parent
lowerCamelCase_ = path.relative_to(UpperCamelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCamelCase_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase )
lowerCamelCase_ = Path(UpperCamelCase )
lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase ):
if os.path.basename(UpperCamelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase ):
if os.path.basename(UpperCamelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(UpperCamelCase , UpperCamelCase )
| 675 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 47 |
'''simple docstring'''
import os
def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file:
lowerCamelCase_ = in_file.read()
lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()]
lowerCamelCase_ = [[0 for cell in row] for row in grid]
lowerCamelCase_ = len(grid[0] )
lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
lowerCamelCase_ = grid[0][0]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_ ):
for j in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 675 | 0 |
from string import ascii_uppercase
_UpperCamelCase = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCamelCase = dict(enumerate(ascii_uppercase))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = len(UpperCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = 0
while True:
if x == i:
__lowerCAmelCase : Any = 0
if len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ):
break
key += key[i]
i += 1
return key
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[int] = ''''''
__lowerCAmelCase : int = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowerCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Any = ''''''
__lowerCAmelCase : Tuple = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowerCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def _lowercase ( ):
__lowerCAmelCase : List[Any] = '''THE GERMAN ATTACK'''
__lowerCAmelCase : Optional[Any] = '''SECRET'''
__lowerCAmelCase : Optional[int] = generate_key(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCAmelCase : List[Any] = cipher_text(UpperCAmelCase_ , UpperCAmelCase_ )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(UpperCAmelCase_ , UpperCAmelCase_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 492 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def snake_case ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self ):
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 675 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """roformer"""
def __init__( self , lowerCAmelCase=50_000 , lowerCAmelCase=None , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3_072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_536 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=0 , lowerCAmelCase=False , lowerCAmelCase=True , **lowerCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size if embedding_size is None else embedding_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =rotary_value
_lowercase =use_cache
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase ={0: 'batch', 1: 'sequence'}
_lowercase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 291 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a_ : Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a_ : Optional[Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __snake_case ( ):
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def __snake_case ( ):
lowerCamelCase_ = "rougeLsum"
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __snake_case ( ):
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def __snake_case ( ):
lowerCamelCase_ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCamelCase_ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def __snake_case ( ):
lowerCamelCase_ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCamelCase_ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def __snake_case ( ):
lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 675 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 328 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger("""transformers.models.encodec""")
a_ : List[str] = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
a_ : Optional[int] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
a_ : Tuple = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
a_ : Union[str, Any] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
a_ : Union[str, Any] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
a_ : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
a_ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
a_ : Any = []
a_ : str = []
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l0":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l0":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l0":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l0":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l1":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l1":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l1":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l1":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase_ = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info(F'''{name} was ignored''' )
continue
lowerCamelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "weight_ih_l0" in name:
lowerCamelCase_ = "weight_ih_l0"
elif "weight_hh_l0" in name:
lowerCamelCase_ = "weight_hh_l0"
elif "bias_ih_l0" in name:
lowerCamelCase_ = "bias_ih_l0"
elif "bias_hh_l0" in name:
lowerCamelCase_ = "bias_hh_l0"
elif "weight_ih_l1" in name:
lowerCamelCase_ = "weight_ih_l1"
elif "weight_hh_l1" in name:
lowerCamelCase_ = "weight_hh_l1"
elif "bias_ih_l1" in name:
lowerCamelCase_ = "bias_ih_l1"
elif "bias_hh_l1" in name:
lowerCamelCase_ = "bias_hh_l1"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
lowerCamelCase_ = "weight"
elif "running_mean" in name:
lowerCamelCase_ = "running_mean"
elif "running_var" in name:
lowerCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ = "num_batches_tracked"
else:
lowerCamelCase_ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , ):
if config_path is not None:
lowerCamelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase_ = [8, 5, 4, 4]
lowerCamelCase_ = [2.2]
lowerCamelCase_ = 64
lowerCamelCase_ = 32000
lowerCamelCase_ = 2048
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
elif model_name == "encodec_48khz":
lowerCamelCase_ = [8, 5, 4, 2]
lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0]
lowerCamelCase_ = 48000
lowerCamelCase_ = 2
lowerCamelCase_ = False
lowerCamelCase_ = "time_group_norm"
lowerCamelCase_ = True
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowerCamelCase_ = EncodecModel(UpperCAmelCase_ )
lowerCamelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase_ = original_checkpoint["best_state"]
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a_ : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 675 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase ( ) -> Tuple:
_snake_case : List[str] = [randint(-1_000 , 1_000 ) for i in range(10 )]
_snake_case : str = randint(-5_000 , 5_000 )
return (arr, r)
a__ = make_dataset()
def lowercase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
for triplet in permutations(UpperCAmelCase_ , 3 ):
if sum(UpperCAmelCase_ ) == target:
return tuple(sorted(UpperCAmelCase_ ) )
return (0, 0, 0)
def lowercase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
arr.sort()
_snake_case : List[Any] = len(UpperCAmelCase_ )
for i in range(n - 1 ):
_snake_case , _snake_case : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase ( ) -> Dict:
_snake_case : List[str] = """\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"""
_snake_case : str = """\ntriplet_sum1(*dataset)\n"""
_snake_case : List[str] = """\ntriplet_sum2(*dataset)\n"""
_snake_case : Union[str, Any] = repeat(setup=UpperCAmelCase_ , stmt=UpperCAmelCase_ , repeat=5 , number=10_000 )
_snake_case : Tuple = repeat(setup=UpperCAmelCase_ , stmt=UpperCAmelCase_ , repeat=5 , number=10_000 )
return (min(UpperCAmelCase_ ), min(UpperCAmelCase_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
a__ = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 477 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = load_from_cache_file
lowerCamelCase_ = file_format
lowerCamelCase_ = Spark(
df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 675 | 0 |
'''simple docstring'''
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('String lengths must match!' )
snake_case__ = 0
for chara, chara in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCamelCase_ = 6
lowerCamelCase_ = 1
lowerCamelCase_ = 1901
lowerCamelCase_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCamelCase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCamelCase_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 675 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 551 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "deformable_detr"
_lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = backbone_config.get("model_type" )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(UpperCamelCase )
lowerCamelCase_ = use_timm_backbone
lowerCamelCase_ = backbone_config
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_queries
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = init_xavier_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = auxiliary_loss
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = backbone
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = dilation
# deformable attributes
lowerCamelCase_ = num_feature_levels
lowerCamelCase_ = encoder_n_points
lowerCamelCase_ = decoder_n_points
lowerCamelCase_ = two_stage
lowerCamelCase_ = two_stage_num_proposals
lowerCamelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase_ = class_cost
lowerCamelCase_ = bbox_cost
lowerCamelCase_ = giou_cost
# Loss coefficients
lowerCamelCase_ = mask_loss_coefficient
lowerCamelCase_ = dice_loss_coefficient
lowerCamelCase_ = bbox_loss_coefficient
lowerCamelCase_ = giou_loss_coefficient
lowerCamelCase_ = eos_coefficient
lowerCamelCase_ = focal_alpha
lowerCamelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def snake_case ( self ):
"""simple docstring"""
return self.d_model
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 675 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : Tuple=[1, 2, 1] , UpperCamelCase_ : Tuple=[2, 2, 4] , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : int=2.0 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Any=False , UpperCamelCase_ : str=True , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Optional[int]=1e-5 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : Union[str, Any]=8 , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : Union[str, Any] = image_size
lowerCamelCase_ : Union[str, Any] = patch_size
lowerCamelCase_ : Any = num_channels
lowerCamelCase_ : Dict = embed_dim
lowerCamelCase_ : int = depths
lowerCamelCase_ : Optional[Any] = num_heads
lowerCamelCase_ : Tuple = window_size
lowerCamelCase_ : Dict = mlp_ratio
lowerCamelCase_ : Dict = qkv_bias
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : str = use_absolute_embeddings
lowerCamelCase_ : int = patch_norm
lowerCamelCase_ : Optional[int] = layer_norm_eps
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : List[str] = is_training
lowerCamelCase_ : Tuple = scope
lowerCamelCase_ : List[str] = use_labels
lowerCamelCase_ : Optional[int] = type_sequence_label_size
lowerCamelCase_ : List[str] = encoder_stride
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : List[str] = SwinvaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : Optional[Any] = model(UpperCamelCase_ )
lowerCamelCase_ : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = SwinvaForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : Any = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : Tuple = SwinvaForMaskedImageModeling(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Any = self.type_sequence_label_size
lowerCamelCase_ : Any = SwinvaForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : List[str] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = config_and_inputs
lowerCamelCase_ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
A = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = SwinvaModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : int = model_class(UpperCamelCase_ )
lowerCamelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : List[str] = [*signature.parameters.keys()]
lowerCamelCase_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int = True
for model_class in self.all_model_classes:
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Tuple = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCamelCase_ : List[str] = outputs.attentions
lowerCamelCase_ : Union[str, Any] = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : str = True
lowerCamelCase_ : int = config.window_size**2
lowerCamelCase_ : Union[str, Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCamelCase_ : int = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCamelCase_ : int = len(UpperCamelCase_ )
# Check attention is always last and order is fine
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowerCamelCase_ : str = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase_ : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) )
lowerCamelCase_ : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : List[str] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCamelCase_ : Optional[Any] = outputs.hidden_states
lowerCamelCase_ : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# Swinv2 has a different seq_length
lowerCamelCase_ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ : Any = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase_ : Tuple = (
reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase_ : Tuple = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Optional[Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = 3
lowerCamelCase_ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase_ : str = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : int = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : int = SwinvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : str = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[str] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
UpperCamelCase_ )
lowerCamelCase_ : str = self.default_image_processor
lowerCamelCase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ : List[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Optional[int] = model(**UpperCamelCase_ )
# verify the logits
lowerCamelCase_ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
| 501 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ = model
lowerCamelCase_ = 2
lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# load longformer model from model identifier
lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = LightningModel(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase_ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 675 | 0 |
def snake_case_ (__A : int , __A : int ) -> Any:
return int(input_a == input_a == 0 )
def snake_case_ () -> Tuple:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 651 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Optional[Any] = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 143 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = GPTSwaTokenizer
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "This is a test"
lowerCamelCase_ = "This is a test"
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "<s>"
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCamelCase ) , 2000 )
def snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase )
lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."]
lowerCamelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
| 675 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowercase ) -> Tuple:
__lowerCAmelCase = 2
__lowerCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase_ )
if n > 1:
factors.append(UpperCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "OwlViTImageProcessor"
_lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase ))
lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , )
return self.image_processor
| 675 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCAmelCase__ ( lowerCamelCase_ : str = "laptop" ):
__a : List[Any] = f'''https://www.amazon.in/laptop/s?k={product}'''
__a : Optional[Any] = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
__a : Optional[int] = BeautifulSoup(requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).text )
# Initialize a Pandas dataframe with the column titles
__a : str = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
__a : Dict = item.ha.text
__a : List[str] = 'https://www.amazon.in/' + item.ha.a['href']
__a : Optional[Any] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
__a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
__a : Dict = 'Not available'
try:
__a : List[str] = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
__a : int = ''
try:
__a : int = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 1_0_0 )
except ValueError:
__a : str = float('nan' )
except AttributeError:
pass
__a : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__a : List[str] = ' '
__a : Optional[Any] = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """headphones"""
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 47 |
'''simple docstring'''
import os
import sys
import unittest
a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""")
a_ : List[Any] = """
{0} = None
"""
a_ : Optional[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
a_ : str = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(UpperCamelCase )
lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(UpperCamelCase , "tokenizers" )
lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(UpperCamelCase , "tensorflow_text" )
lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" )
lowerCamelCase_ = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" )
lowerCamelCase_ = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , UpperCamelCase )
self.assertIn("tensorflow_text" , UpperCamelCase )
self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" )
lowerCamelCase_ = create_dummy_object("function" , "'torch'" )
self.assertEqual(
UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , UpperCamelCase )
| 675 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_UpperCamelCase = Features({"""audio""": Audio()} )
_UpperCamelCase = Features({"""transcription""": Value("""string""" )} )
_UpperCamelCase = """audio"""
_UpperCamelCase = """transcription"""
def UpperCamelCase__ ( self , A_ ) ->int:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , A_ ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
__lowerCAmelCase : Any = copy.deepcopy(self )
__lowerCAmelCase : Dict = self.input_schema.copy()
__lowerCAmelCase : Optional[int] = features[self.audio_column]
__lowerCAmelCase : Any = input_schema
return task_template
@property
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 492 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=lowercase ):
"""simple docstring"""
_lowerCamelCase = ["onnx"]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 675 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ = 3
def a ( A__ : int ) -> Any:
"""simple docstring"""
print('Generating primitive root of p' )
while True:
_lowercase =random.randrange(3 , UpperCAmelCase_ )
if pow(UpperCAmelCase_ , 2 , UpperCAmelCase_ ) == 1:
continue
if pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) == 1:
continue
return g
def a ( A__ : int ) -> Union[str, Any]:
"""simple docstring"""
print('Generating prime p...' )
_lowercase =rabin_miller.generate_large_prime(UpperCAmelCase_ ) # select large prime number.
_lowercase =primitive_root(UpperCAmelCase_ ) # one primitive root on modulo p.
_lowercase =random.randrange(3 , UpperCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_lowercase =cryptomath.find_mod_inverse(pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
_lowercase =(key_size, e_a, e_a, p)
_lowercase =(key_size, d)
return public_key, private_key
def a ( A__ : str , A__ : int ) -> Optional[int]:
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_lowercase , _lowercase =generate_key(UpperCAmelCase_ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def a ( ) -> Any:
"""simple docstring"""
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 291 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = range_bbox
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ = bbox[i, j, 3]
lowerCamelCase_ = bbox[i, j, 1]
lowerCamelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ = bbox[i, j, 2]
lowerCamelCase_ = bbox[i, j, 0]
lowerCamelCase_ = t
lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = 10
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
# test the sequence output on [0, :3, :3]
lowerCamelCase_ = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized sequence classification head
lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(
input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = (2,)
self.assertEqual(loss.shape , UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = (2, 2)
self.assertEqual(logits.shape , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized token classification head
lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(
input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized token classification head
lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , UpperCamelCase )
self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
| 675 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__UpperCamelCase : List[Any] = Lock()
def a_ ( _A , _A , _A , _A , _A , _A , _A ) -> Tuple:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case__ = min(UpperCAmelCase_ , UpperCAmelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case__ = max(UpperCAmelCase_ , UpperCAmelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase_ )
def a_ ( _A ) -> str:
"""simple docstring"""
snake_case__ = []
snake_case__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case__ = Pipe()
snake_case__ = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case__ = temp_rs
snake_case__ = temp_rr
for i in range(1 , len(UpperCAmelCase_ ) - 1 ):
snake_case__ = Pipe()
snake_case__ = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case__ = temp_rs
snake_case__ = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(
len(UpperCAmelCase_ ) - 1,
arr[len(UpperCAmelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase_ ) ):
snake_case__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a_ ( ) -> Any:
"""simple docstring"""
snake_case__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCAmelCase_ )
snake_case__ = odd_even_transposition(UpperCAmelCase_ )
print('Sorted List\n' )
print(*UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 328 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a_ : int = """docs/source/en/_toctree.yml"""
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = defaultdict(UpperCAmelCase_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(UpperCAmelCase_ )
lowerCamelCase_ = new_doc_list
lowerCamelCase_ = [key for key, value in counts.items() if value > 1]
lowerCamelCase_ = []
for duplicate_key in duplicates:
lowerCamelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(UpperCAmelCase_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase_ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(UpperCAmelCase_ )
# Sort
return overview_doc
def __snake_case ( UpperCAmelCase_ : List[str]=False ):
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
lowerCamelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase_ = content[api_idx]["sections"]
# Then to the model doc
lowerCamelCase_ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCamelCase_ = api_doc[scheduler_idx]["sections"]
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
lowerCamelCase_ = False
if new_scheduler_doc != scheduler_doc:
lowerCamelCase_ = True
if overwrite:
lowerCamelCase_ = new_scheduler_doc
if diff:
if overwrite:
lowerCamelCase_ = api_doc
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def __snake_case ( UpperCAmelCase_ : List[Any]=False ):
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
lowerCamelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase_ = content[api_idx]["sections"]
# Then to the model doc
lowerCamelCase_ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCamelCase_ = False
lowerCamelCase_ = api_doc[pipeline_idx]["sections"]
lowerCamelCase_ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCamelCase_ = pipeline_doc["section"]
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
if overwrite:
lowerCamelCase_ = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase_ )
# sort overall pipeline doc
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
lowerCamelCase_ = True
if overwrite:
lowerCamelCase_ = new_pipeline_docs
if diff:
if overwrite:
lowerCamelCase_ = api_doc
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : int = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 675 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a__ = """__DUMMY_TRANSFORMERS_USER__"""
a__ = """Dummy User"""
a__ = """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"""
a__ = """https://hub-ci.huggingface.co"""
a__ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
a__ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
a__ = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , UpperCAmelCase_ )
@pytest.fixture
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , UpperCAmelCase_ )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , UpperCAmelCase_ )
@pytest.fixture
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , UpperCAmelCase_ )
@pytest.fixture
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
HfFolder.save_token(UpperCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def lowercase ( ) -> Dict:
return HfApi(endpoint=UpperCAmelCase_ )
@pytest.fixture(scope="""session""" )
def lowercase ( SCREAMING_SNAKE_CASE__ : HfApi ) -> Any:
_snake_case : Any = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase_ )
@pytest.fixture
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Any:
def _cleanup_repo(SCREAMING_SNAKE_CASE__ : Tuple ):
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE__ : Dict ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def lowercase ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
_snake_case : Union[str, Any] = F'''repo_txt_data-{int(time.time() * 1_0e3 )}'''
_snake_case : Optional[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="""dataset""" , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo="""data/text_data.txt""" , repo_id=UpperCAmelCase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def lowercase ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
_snake_case : int = F'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'''
_snake_case : Dict = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="""dataset""" , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo="""data.zip""" , repo_id=UpperCAmelCase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def lowercase ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
_snake_case : Tuple = F'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}'''
_snake_case : Tuple = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="""dataset""" , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo="""data.zip""" , repo_id=UpperCAmelCase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return hf_private_dataset_repo_zipped_img_data_
| 477 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ):
lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ )
lowerCamelCase_ = tok.pad_token_id
def get_lens(UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = tqdm(
DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCamelCase_ = []
for batch in dl:
lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist()
lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) )
else:
max_lens.extend(UpperCAmelCase_ )
return max_lens
lowerCamelCase_ = get_lens(UpperCAmelCase_ )
lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ )
lowerCamelCase_ = get_lens(UpperCAmelCase_ )
pickle_save(UpperCAmelCase_ , train_ds.len_file )
pickle_save(UpperCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 675 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a__ : List[Any] = """sshleifer/bart-tiny-random"""
a__ : int = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __snake_case ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Optional[Any]:
return AutoConfig.from_pretrained(UpperCamelCase_ )
def _snake_case ( self ) -> Tuple:
snake_case__ , *snake_case__ = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> Any:
snake_case__ , *snake_case__ = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase_ )
def _snake_case ( self ) -> Optional[int]:
snake_case__ , *snake_case__ = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Tuple:
snake_case__ , *snake_case__ = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> Optional[Any]:
with self.assertRaises(UpperCamelCase_ ):
create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=UpperCamelCase_ , d=UpperCamelCase_ )
| 368 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase_ = remove_duplicates(key.upper() )
lowerCamelCase_ = len(UpperCAmelCase_ )
# First fill cipher with key characters
lowerCamelCase_ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCAmelCase_ ) , 26 ):
lowerCamelCase_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase_ = alphabet[i - offset]
lowerCamelCase_ = char
return cipher_alphabet
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ):
return "".join(cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() )
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ):
lowerCamelCase_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() )
def __snake_case ( ):
lowerCamelCase_ = input("Enter message to encode or decode: " ).strip()
lowerCamelCase_ = input("Enter keyword: " ).strip()
lowerCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowerCamelCase_ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowerCamelCase_ = create_cipher_map(UpperCAmelCase_ )
print(func(UpperCAmelCase_ , UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 0 |
import os
import sys
import unittest
__UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCamelCase = os.path.join(git_repo_path, 'src', 'transformers')
__UpperCamelCase = """
{0} = None
"""
__UpperCamelCase = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__UpperCamelCase = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(snake_case )
UpperCamelCase__ = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(snake_case , "tokenizers" )
UpperCamelCase__ = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(snake_case , "tensorflow_text" )
UpperCamelCase__ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(snake_case , "sentencepiece_and_tokenizers" )
UpperCamelCase__ = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(snake_case , "sentencepiece_and_tensorflow_text" )
UpperCamelCase__ = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(snake_case , "sentencepiece_and_tokenizers_and_vision" )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case )
self.assertIn("tensorflow_text" , snake_case )
self.assertIn("sentencepiece_and_tokenizers" , snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case , "\nCONSTANT = None\n" )
UpperCamelCase__ = create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
UpperCamelCase__ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
UpperCamelCase__ = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
UpperCamelCase__ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case )
| 551 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = OpenAIGPTTokenizer
_lowerCamelCase = OpenAIGPTTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return "lower newer", "lower newer"
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = "lower"
lowerCamelCase_ = ["low", "er</w>"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = tokens + ["<unk>"]
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self , UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
lowerCamelCase_ = "This is a simple input"
lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase_ = ("This is a simple input", "This is a pair")
lowerCamelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , )
def snake_case ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case ( lowercase ):
"""simple docstring"""
pass
| 675 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( _lowerCAmelCase ,unittest.TestCase ):
A = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __UpperCamelCase ( self : int , UpperCamelCase_ : Dict=0 ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase_ ) )
lowerCamelCase_ : str = np.random.RandomState(UpperCamelCase_ )
lowerCamelCase_ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs()
lowerCamelCase_ : Tuple = pipe(**UpperCamelCase_ ).images
lowerCamelCase_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Tuple = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : Any = self.get_dummy_inputs()
lowerCamelCase_ : Dict = pipe(**UpperCamelCase_ ).images
lowerCamelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Optional[Any] = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# warmup pass to apply optimizations
lowerCamelCase_ : str = pipe(**self.get_dummy_inputs() )
lowerCamelCase_ : Union[str, Any] = self.get_dummy_inputs()
lowerCamelCase_ : Dict = pipe(**UpperCamelCase_ ).images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Dict = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs()
lowerCamelCase_ : List[str] = pipe(**UpperCamelCase_ ).images
lowerCamelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Optional[Any] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = self.get_dummy_inputs()
lowerCamelCase_ : List[str] = pipe(**UpperCamelCase_ ).images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Optional[Any] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs()
lowerCamelCase_ : Any = pipe(**UpperCamelCase_ ).images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Any = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Any = ort.SessionOptions()
lowerCamelCase_ : List[Any] = False
return options
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : Optional[Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
lowerCamelCase_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : List[str] = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Union[str, Any] = np.random.RandomState(0 )
lowerCamelCase_ : str = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase_ , output_type='''np''' , )
lowerCamelCase_ : str = output.images
lowerCamelCase_ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowerCamelCase_ : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[Any] = init_image.resize((768, 512) )
lowerCamelCase_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCamelCase_ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : int = np.random.RandomState(0 )
lowerCamelCase_ : Optional[int] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase_ , output_type='''np''' , )
lowerCamelCase_ : Optional[int] = output.images
lowerCamelCase_ : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowerCamelCase_ : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 501 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : int = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a_ : Any = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
a_ : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RoFormerTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents
):
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = do_lower_case
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab()
lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
| 675 | 0 |
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__lowerCAmelCase : str = kwargs.pop("""main_process_only""" , lowerCAmelCase )
__lowerCAmelCase : Dict = kwargs.pop("""in_order""" , lowerCAmelCase )
if self.isEnabledFor(lowerCAmelCase ):
if self._should_log(lowerCAmelCase ):
__lowerCAmelCase ,__lowerCAmelCase : str = self.process(lowerCAmelCase , lowerCAmelCase )
self.logger.log(lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
elif in_order:
__lowerCAmelCase : Any = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.process(lowerCAmelCase , lowerCAmelCase )
self.logger.log(lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
state.wait_for_everyone()
def snake_case_ (__A : str , __A : str = None ) -> int:
if log_level is None:
__lowerCAmelCase : Tuple = os.environ.get("""ACCELERATE_LOG_LEVEL""" , UpperCAmelCase_ )
__lowerCAmelCase : Optional[int] = logging.getLogger(UpperCAmelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCAmelCase_ , {} )
| 651 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=3 , UpperCamelCase=None , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_stages
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = out_features
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = num_stages
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = UperNetForSemanticSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_lowerCamelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = UperNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
"""simple docstring"""
return
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not have a base model" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not have a base model" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(UpperCamelCase )
lowerCamelCase_ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def __snake_case ( ):
lowerCamelCase_ = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
lowerCamelCase_ = Image.open(UpperCAmelCase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(UpperCamelCase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(UpperCamelCase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
| 675 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A_ = logging.get_logger(__name__)
A_ = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _snake_case ( _a ):
_A : Any = '''marian'''
_A : List[Any] = ['''past_key_values''']
_A : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict=58_101 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=1_024 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_096 ,SCREAMING_SNAKE_CASE__ : str=16 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=12 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_096 ,SCREAMING_SNAKE_CASE__ : int=16 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Tuple=1_024 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : int=58_100 ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any=58_100 ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ,SCREAMING_SNAKE_CASE__ : str=True ,**SCREAMING_SNAKE_CASE__ : Dict ,):
SCREAMING_SNAKE_CASE:Tuple = vocab_size
SCREAMING_SNAKE_CASE:Tuple = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE:Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE:int = d_model
SCREAMING_SNAKE_CASE:Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE:str = encoder_layers
SCREAMING_SNAKE_CASE:Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE:Union[str, Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE:Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE:Optional[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE:Optional[int] = dropout
SCREAMING_SNAKE_CASE:List[Any] = attention_dropout
SCREAMING_SNAKE_CASE:Tuple = activation_dropout
SCREAMING_SNAKE_CASE:Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE:List[Any] = init_std
SCREAMING_SNAKE_CASE:Any = encoder_layerdrop
SCREAMING_SNAKE_CASE:List[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE:Optional[int] = use_cache
SCREAMING_SNAKE_CASE:Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE:List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE:Optional[int] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,forced_eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
class _snake_case ( _a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE:Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE:int = {0: "batch"}
SCREAMING_SNAKE_CASE:List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE:Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE:Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Tuple = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE:Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE:Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE:Any = super().outputs
else:
SCREAMING_SNAKE_CASE:List[str] = super(SCREAMING_SNAKE_CASE__ ,self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:List[str] = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE:Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : Dict = -1 ,SCREAMING_SNAKE_CASE__ : Dict = False ,SCREAMING_SNAKE_CASE__ : str = None ,):
SCREAMING_SNAKE_CASE:Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE:List[Any] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE:List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE:Optional[Any] = dict(**SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE:int = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = self.num_attention_heads
SCREAMING_SNAKE_CASE:List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE:Dict = decoder_seq_length + 3
SCREAMING_SNAKE_CASE:Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE:List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )] ,dim=1 )
SCREAMING_SNAKE_CASE:Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:str = self.num_layers
SCREAMING_SNAKE_CASE:Optional[int] = min(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = max(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) - min_num_layers
SCREAMING_SNAKE_CASE:Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE:List[str] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any = -1 ,SCREAMING_SNAKE_CASE__ : Optional[Any] = -1 ,SCREAMING_SNAKE_CASE__ : str = False ,SCREAMING_SNAKE_CASE__ : int = None ,):
SCREAMING_SNAKE_CASE:List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE:Any = seqlen + 2
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = self.num_layers
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE:Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE:str = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE:Any = torch.cat(
[common_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )] ,dim=1 )
SCREAMING_SNAKE_CASE:Any = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any = -1 ,SCREAMING_SNAKE_CASE__ : Any = -1 ,SCREAMING_SNAKE_CASE__ : Dict = False ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None ,):
SCREAMING_SNAKE_CASE:Any = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE:List[Any] = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE:Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE:Optional[Any] = dict(tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict = -1 ,SCREAMING_SNAKE_CASE__ : str = -1 ,SCREAMING_SNAKE_CASE__ : str = False ,SCREAMING_SNAKE_CASE__ : List[str] = None ,):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE:Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE:str = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = super(SCREAMING_SNAKE_CASE__ ,self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@property
def __UpperCamelCase ( self : List[str] ):
return 1e-4
| 143 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a_ : Optional[int] = HfArgumentParser(InitializationArguments)
a_ : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a_ : str = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
a_ : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a_ : Optional[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 675 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Optional[int]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase_ , 2 ) + pow(UpperCAmelCase_ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a_ : List[Any] = get_logger(__name__)
class snake_case :
"""simple docstring"""
_lowerCamelCase = "dummy_data"
_lowerCamelCase = "datasets"
_lowerCamelCase = False
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = dataset_name
lowerCamelCase_ = cache_dir
lowerCamelCase_ = use_local_dummy_data
lowerCamelCase_ = config
# download_callbacks take a single url as input
lowerCamelCase_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCamelCase_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCamelCase_ = str(UpperCamelCase )
# to be downloaded
lowerCamelCase_ = None
lowerCamelCase_ = None
@property
def snake_case ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCamelCase_ = self.download_dummy_data()
return self._dummy_file
@property
def snake_case ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def snake_case ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCamelCase_ = cached_path(
UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase )
return os.path.join(UpperCamelCase , self.dummy_file_name )
@property
def snake_case ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def snake_case ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def snake_case ( self ):
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def snake_case ( self , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCamelCase_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCamelCase_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase , UpperCamelCase ):
return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase )
else:
return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase )
def snake_case ( self , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase )
def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return path
def snake_case ( self ):
"""simple docstring"""
return {}
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase , UpperCamelCase ):
for single_url in single_urls:
download_callback(UpperCamelCase )
else:
lowerCamelCase_ = single_urls
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls]
else:
lowerCamelCase_ = single_urls
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) )
lowerCamelCase_ = value
# make sure that values are unique
if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url )
lowerCamelCase_ = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(UpperCamelCase )
return dummy_data_list
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
def _iter_archive_members(UpperCamelCase ):
# this preserves the order of the members inside the ZIP archive
lowerCamelCase_ = Path(self.dummy_file ).parent
lowerCamelCase_ = path.relative_to(UpperCamelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCamelCase_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase )
lowerCamelCase_ = Path(UpperCamelCase )
lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase ):
if os.path.basename(UpperCamelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase ):
if os.path.basename(UpperCamelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(UpperCamelCase , UpperCamelCase )
| 675 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", F"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", F"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qpos_proj.weight", F"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kpos_proj.weight", F"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.weight", F"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", F"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", F"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kpos_proj.weight", F"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.weight", F"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", F"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", F"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", F"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_qpos_proj.bias", F"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_kpos_proj.bias", F"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.bias", F"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", F"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", F"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_kpos_proj.bias", F"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.bias", F"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", F"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ):
__a : Optional[Any] = state_dict.pop(UpperCAmelCase_ )
__a : Union[str, Any] = val
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__a : int = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__a : Union[str, Any] = value
else:
__a : str = value
return new_state_dict
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict=False ):
__a : List[str] = ''
if is_panoptic:
__a : List[str] = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__a : List[str] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__a : Optional[int] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__a : str = in_proj_weight[:2_5_6, :]
__a : List[Any] = in_proj_bias[:2_5_6]
__a : Tuple = in_proj_weight[2_5_6:5_1_2, :]
__a : Optional[Any] = in_proj_bias[2_5_6:5_1_2]
__a : int = in_proj_weight[-2_5_6:, :]
__a : List[str] = in_proj_bias[-2_5_6:]
def UpperCAmelCase__ ( ):
__a : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : int = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ):
__a : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__a : Dict = 'resnet101'
if "dc5" in model_name:
__a : List[str] = True
__a : List[Any] = 'panoptic' in model_name
if is_panoptic:
__a : List[str] = 2_5_0
else:
__a : Optional[int] = 9_1
__a : Dict = 'huggingface/label-files'
__a : Union[str, Any] = 'coco-detection-id2label.json'
__a : str = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__a : Dict = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__a : int = idalabel
__a : Any = {v: k for k, v in idalabel.items()}
# load image processor
__a : List[str] = 'coco_panoptic' if is_panoptic else 'coco_detection'
__a : int = ConditionalDetrImageProcessor(format=UpperCAmelCase_ )
# prepare image
__a : Optional[Any] = prepare_img()
__a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' )
__a : str = encoding['pixel_values']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
__a : List[str] = torch.hub.load('DeppMeng/ConditionalDETR' , UpperCAmelCase_ , pretrained=UpperCAmelCase_ ).eval()
__a : Optional[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__a : Optional[int] = 'conditional_detr.' + src
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__a : Optional[int] = rename_backbone_keys(UpperCAmelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase_ , is_panoptic=UpperCAmelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__a : Tuple = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__a : Union[str, Any] = state_dict.pop(UpperCAmelCase_ )
__a : Dict = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__a : Tuple = state_dict.pop(UpperCAmelCase_ )
__a : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__a : Optional[Any] = state_dict.pop(UpperCAmelCase_ )
__a : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__a : Dict = state_dict.pop(UpperCAmelCase_ )
__a : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
__a : Optional[Any] = ConditionalDetrForSegmentation(UpperCAmelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCAmelCase_ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
__a : Optional[int] = conditional_detr(UpperCAmelCase_ )
__a : Optional[Any] = model(UpperCAmelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 47 |
'''simple docstring'''
import os
def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file:
lowerCamelCase_ = in_file.read()
lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()]
lowerCamelCase_ = [[0 for cell in row] for row in grid]
lowerCamelCase_ = len(grid[0] )
lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
lowerCamelCase_ = grid[0][0]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_ ):
for j in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 675 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCamelCase = get_logger(__name__)
_UpperCamelCase = R"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class __lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) ->str:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) ->Any:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __lowercase (_UpperCAmelCase ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) ->Any:
'''simple docstring'''
for processor in self:
__lowerCAmelCase : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
__lowerCAmelCase : List[Any] = processor(A_ , A_ , A_ , **A_ )
else:
__lowerCAmelCase : str = processor(A_ , A_ , A_ )
return scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ ) ->str:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
__lowerCAmelCase : int = temperature
def __call__( self , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = scores / self.temperature
return scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_ = -float('''Inf''' ) , A_ = 1 ) ->List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
__lowerCAmelCase : Tuple = top_p
__lowerCAmelCase : Tuple = filter_value
__lowerCAmelCase : List[Any] = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : List[Any] = lax.top_k(A_ , scores.shape[-1] )
__lowerCAmelCase : str = jnp.full_like(A_ , self.filter_value )
__lowerCAmelCase : Tuple = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
__lowerCAmelCase : Dict = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCAmelCase : Dict = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
__lowerCAmelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
__lowerCAmelCase : Dict = jnp.where(A_ , A_ , A_ )
__lowerCAmelCase : Optional[int] = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_ = -float('''Inf''' ) , A_ = 1 ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
__lowerCAmelCase : str = max(A_ , A_ )
__lowerCAmelCase : Dict = filter_value
def __call__( self , A_ , A_ , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Dict = scores.shape
__lowerCAmelCase : Dict = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCAmelCase : int = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCAmelCase, __lowerCAmelCase : str = lax.top_k(A_ , A_ )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCAmelCase : str = topk_scores.flatten()
__lowerCAmelCase : Tuple = topk_indices.flatten() + shift
__lowerCAmelCase : Union[str, Any] = next_scores_flat.at[topk_indices_flat].set(A_ )
__lowerCAmelCase : List[Any] = next_scores_flat.reshape(A_ , A_ )
return next_scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = bos_token_id
def __call__( self , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = jnp.full(scores.shape , -float('''inf''' ) )
__lowerCAmelCase : List[Any] = 1 - jnp.bool_(cur_len - 1 )
__lowerCAmelCase : int = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_ ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = max_length
__lowerCAmelCase : Optional[Any] = eos_token_id
def __call__( self , A_ , A_ , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = jnp.full(scores.shape , -float('''inf''' ) )
__lowerCAmelCase : int = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCAmelCase : Optional[int] = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_ ) ->Dict:
'''simple docstring'''
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
__lowerCAmelCase : Dict = min_length
__lowerCAmelCase : Optional[Any] = eos_token_id
def __call__( self , A_ , A_ , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCAmelCase : Any = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , A_ )
return scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = list(A_ )
__lowerCAmelCase : Union[str, Any] = begin_index
def __call__( self , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCAmelCase : Optional[int] = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , A_ )
return scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = list(A_ )
def __call__( self , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : Any = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCAmelCase : List[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCAmelCase : Dict = force_token_array.at[index].set(A_ )
__lowerCAmelCase : Optional[Any] = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
def _force_token(A_ ):
__lowerCAmelCase : str = scores.shape[0]
__lowerCAmelCase : Dict = self.force_token_array[generation_idx]
__lowerCAmelCase : Optional[int] = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('''inf''' )
__lowerCAmelCase : int = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCAmelCase : Dict = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
__lowerCAmelCase : Dict = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = generate_config.eos_token_id
__lowerCAmelCase : Union[str, Any] = generate_config.no_timestamps_token_id
__lowerCAmelCase : List[str] = generate_config.no_timestamps_token_id + 1
__lowerCAmelCase : Tuple = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , '''max_initial_timestamp_index''' ):
__lowerCAmelCase : Any = generate_config.max_initial_timestamp_index
else:
__lowerCAmelCase : Optional[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCAmelCase : str = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(A_ , A_ ):
__lowerCAmelCase : int = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
__lowerCAmelCase : Optional[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
__lowerCAmelCase : Dict = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
__lowerCAmelCase : List[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , A_ , )
__lowerCAmelCase : Dict = jax.vmap(A_ )(A_ , A_ )
__lowerCAmelCase : str = jnp.where(cur_len == self.begin_index , A_ , A_ )
__lowerCAmelCase : List[str] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
__lowerCAmelCase : Dict = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCAmelCase : Dict = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCAmelCase : Union[str, Any] = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
__lowerCAmelCase : int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCAmelCase : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , A_ , )
__lowerCAmelCase : Optional[int] = jax.vmap(A_ )(A_ , A_ )
return scores
| 492 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def snake_case ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self ):
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 675 | 0 |
from torch import nn
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase =class_size
_lowercase =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowercase =nn.Linear(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.mlp(lowerCAmelCase )
return logits
| 291 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a_ : Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a_ : Optional[Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __snake_case ( ):
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def __snake_case ( ):
lowerCamelCase_ = "rougeLsum"
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __snake_case ( ):
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def __snake_case ( ):
lowerCamelCase_ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCamelCase_ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def __snake_case ( ):
lowerCamelCase_ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCamelCase_ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def __snake_case ( ):
lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 675 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__UpperCamelCase : Any = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
__UpperCamelCase : Optional[int] = None
def a_ ( ) -> Any:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=UpperCAmelCase_ , default=1.0 , help='Predict \"\" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=UpperCAmelCase_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a_ ( _A ) -> Tuple:
"""simple docstring"""
snake_case__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ = bool(qa['answers']['text'] )
return qid_to_has_ans
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
def remove_articles(_A ):
return ARTICLES_REGEX.sub(' ' , UpperCAmelCase_ )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase_ ) ) ) )
def a_ ( _A ) -> Any:
"""simple docstring"""
if not s:
return []
return normalize_answer(UpperCAmelCase_ ).split()
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
return int(normalize_answer(UpperCAmelCase_ ) == normalize_answer(UpperCAmelCase_ ) )
def a_ ( _A , _A ) -> str:
"""simple docstring"""
snake_case__ = get_tokens(UpperCAmelCase_ )
snake_case__ = get_tokens(UpperCAmelCase_ )
snake_case__ = collections.Counter(UpperCAmelCase_ ) & collections.Counter(UpperCAmelCase_ )
snake_case__ = sum(common.values() )
if len(UpperCAmelCase_ ) == 0 or len(UpperCAmelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case__ = 1.0 * num_same / len(UpperCAmelCase_ )
snake_case__ = 1.0 * num_same / len(UpperCAmelCase_ )
snake_case__ = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _A , _A ) -> int:
"""simple docstring"""
snake_case__ = {}
snake_case__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ = qa['id']
snake_case__ = [t for t in qa['answers']['text'] if normalize_answer(UpperCAmelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case__ = ['']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
snake_case__ = preds[qid]
# Take max over all gold answers
snake_case__ = max(compute_exact(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers )
snake_case__ = max(compute_fa(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def a_ ( _A , _A , _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = {}
for qid, s in scores.items():
snake_case__ = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case__ = float(not qid_to_has_ans[qid] )
else:
snake_case__ = s
return new_scores
def a_ ( _A , _A , _A=None ) -> Optional[int]:
"""simple docstring"""
if not qid_list:
snake_case__ = len(UpperCAmelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
snake_case__ = len(UpperCAmelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def a_ ( _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
for k in new_eval:
snake_case__ = new_eval[k]
def a_ ( _A , _A , _A , _A ) -> Union[str, Any]:
"""simple docstring"""
plt.step(UpperCAmelCase_ , UpperCAmelCase_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(UpperCAmelCase_ , UpperCAmelCase_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCAmelCase_ )
plt.savefig(UpperCAmelCase_ )
plt.clf()
def a_ ( _A , _A , _A , _A , _A=None , _A=None ) -> Optional[int]:
"""simple docstring"""
snake_case__ = sorted(UpperCAmelCase_ , key=lambda _A : na_probs[k] )
snake_case__ = 0.0
snake_case__ = 1.0
snake_case__ = 0.0
snake_case__ = [1.0]
snake_case__ = [0.0]
snake_case__ = 0.0
for i, qid in enumerate(UpperCAmelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case__ = true_pos / float(i + 1 )
snake_case__ = true_pos / float(UpperCAmelCase_ )
if i == len(UpperCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCAmelCase_ )
recalls.append(UpperCAmelCase_ )
if out_image:
plot_pr_curve(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return {"ap": 100.0 * avg_prec}
def a_ ( _A , _A , _A , _A , _A , _A ) -> Any:
"""simple docstring"""
if out_image_dir and not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
snake_case__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case__ = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
snake_case__ = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
snake_case__ = {k: float(UpperCAmelCase_ ) for k, v in qid_to_has_ans.items()}
snake_case__ = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'pr_exact' )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'pr_f1' )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'pr_oracle' )
def a_ ( _A , _A , _A , _A ) -> Any:
"""simple docstring"""
if not qid_list:
return
snake_case__ = [na_probs[k] for k in qid_list]
snake_case__ = np.ones_like(UpperCAmelCase_ ) / float(len(UpperCAmelCase_ ) )
plt.hist(UpperCAmelCase_ , weights=UpperCAmelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(UpperCAmelCase_ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def a_ ( _A , _A , _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case__ = num_no_ans
snake_case__ = cur_score
snake_case__ = 0.0
snake_case__ = sorted(UpperCAmelCase_ , key=lambda _A : na_probs[k] )
for i, qid in enumerate(UpperCAmelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case__ = scores[qid]
else:
if preds[qid]:
snake_case__ = -1
else:
snake_case__ = 0
cur_score += diff
if cur_score > best_score:
snake_case__ = cur_score
snake_case__ = na_probs[qid]
return 100.0 * best_score / len(UpperCAmelCase_ ), best_thresh
def a_ ( _A , _A , _A , _A , _A , _A ) -> Dict:
"""simple docstring"""
snake_case__ , snake_case__ = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ , snake_case__ = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = best_exact
snake_case__ = exact_thresh
snake_case__ = best_fa
snake_case__ = fa_thresh
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(OPTS.data_file ) as f:
snake_case__ = json.load(UpperCAmelCase_ )
snake_case__ = dataset_json['data']
with open(OPTS.pred_file ) as f:
snake_case__ = json.load(UpperCAmelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case__ = json.load(UpperCAmelCase_ )
else:
snake_case__ = {k: 0.0 for k in preds}
snake_case__ = make_qid_to_has_ans(UpperCAmelCase_ ) # maps qid to True/False
snake_case__ = [k for k, v in qid_to_has_ans.items() if v]
snake_case__ = [k for k, v in qid_to_has_ans.items() if not v]
snake_case__ , snake_case__ = get_raw_scores(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh )
snake_case__ = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh )
snake_case__ = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ )
if has_ans_qids:
snake_case__ = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'HasAns' )
if no_ans_qids:
snake_case__ = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir )
histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
else:
print(json.dumps(UpperCAmelCase_ , indent=2 ) )
if __name__ == "__main__":
__UpperCamelCase : Tuple = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 328 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger("""transformers.models.encodec""")
a_ : List[str] = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
a_ : Optional[int] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
a_ : Tuple = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
a_ : Union[str, Any] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
a_ : Union[str, Any] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
a_ : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
a_ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
a_ : Any = []
a_ : str = []
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l0":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l0":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l0":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l0":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l1":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l1":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l1":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l1":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase_ = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info(F'''{name} was ignored''' )
continue
lowerCamelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "weight_ih_l0" in name:
lowerCamelCase_ = "weight_ih_l0"
elif "weight_hh_l0" in name:
lowerCamelCase_ = "weight_hh_l0"
elif "bias_ih_l0" in name:
lowerCamelCase_ = "bias_ih_l0"
elif "bias_hh_l0" in name:
lowerCamelCase_ = "bias_hh_l0"
elif "weight_ih_l1" in name:
lowerCamelCase_ = "weight_ih_l1"
elif "weight_hh_l1" in name:
lowerCamelCase_ = "weight_hh_l1"
elif "bias_ih_l1" in name:
lowerCamelCase_ = "bias_ih_l1"
elif "bias_hh_l1" in name:
lowerCamelCase_ = "bias_hh_l1"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
lowerCamelCase_ = "weight"
elif "running_mean" in name:
lowerCamelCase_ = "running_mean"
elif "running_var" in name:
lowerCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ = "num_batches_tracked"
else:
lowerCamelCase_ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , ):
if config_path is not None:
lowerCamelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase_ = [8, 5, 4, 4]
lowerCamelCase_ = [2.2]
lowerCamelCase_ = 64
lowerCamelCase_ = 32000
lowerCamelCase_ = 2048
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
elif model_name == "encodec_48khz":
lowerCamelCase_ = [8, 5, 4, 2]
lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0]
lowerCamelCase_ = 48000
lowerCamelCase_ = 2
lowerCamelCase_ = False
lowerCamelCase_ = "time_group_norm"
lowerCamelCase_ = True
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowerCamelCase_ = EncodecModel(UpperCAmelCase_ )
lowerCamelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase_ = original_checkpoint["best_state"]
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a_ : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 675 | 0 |
import math
import sys
import cva
import numpy as np
def lowercase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float ) -> Union[str, Any]:
# For applying gaussian function for each element in matrix.
_snake_case : Optional[int] = math.sqrt(UpperCAmelCase_ )
_snake_case : Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowercase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str:
_snake_case : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float ) -> List[str]:
# Creates a gaussian kernel of given dimension.
_snake_case : List[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , UpperCAmelCase_ ):
for j in range(0 , UpperCAmelCase_ ):
_snake_case : Union[str, Any] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int , ) -> Optional[Any]:
_snake_case : Union[str, Any] = np.zeros(img.shape )
_snake_case : Optional[Any] = get_gauss_kernel(UpperCAmelCase_ , UpperCAmelCase_ )
_snake_case , _snake_case : str = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_snake_case : int = get_slice(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_snake_case : List[Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
_snake_case : Any = vec_gaussian(UpperCAmelCase_ , UpperCAmelCase_ )
_snake_case : Union[str, Any] = np.multiply(UpperCAmelCase_ , UpperCAmelCase_ )
_snake_case : Union[str, Any] = np.multiply(UpperCAmelCase_ , UpperCAmelCase_ )
_snake_case : Optional[int] = np.sum(UpperCAmelCase_ ) / np.sum(UpperCAmelCase_ )
_snake_case : Optional[int] = val
return imga
def lowercase ( SCREAMING_SNAKE_CASE__ : list ) -> Optional[Any]:
_snake_case : Dict = args[1] if args[1:] else """../image_data/lena.jpg"""
_snake_case : Optional[Any] = float(args[2] ) if args[2:] else 1.0
_snake_case : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_snake_case : Dict = int(args[4] )
_snake_case : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
_snake_case : Union[str, Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a__ = parse_args(sys.argv)
a__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
a__ = img / 2_55
a__ = out.astype("""float32""")
a__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a__ = out * 2_55
a__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 477 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = load_from_cache_file
lowerCamelCase_ = file_format
lowerCamelCase_ = Spark(
df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 675 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a__ : Any = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCamelCase_ = 6
lowerCamelCase_ = 1
lowerCamelCase_ = 1901
lowerCamelCase_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCamelCase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCamelCase_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 675 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = StableUnCLIPPipeline
_UpperCamelCase : List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : List[str] = False
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = 32
UpperCamelCase__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCamelCase__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase__ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase__ = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
UpperCamelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL()
UpperCamelCase__ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case__ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
if str(snake_case ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(snake_case )
else:
UpperCamelCase__ = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
UpperCamelCase__ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase__ = pipe("anime turle" , generator=snake_case , output_type="np" )
UpperCamelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
UpperCamelCase__ = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
UpperCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 551 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "deformable_detr"
_lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = backbone_config.get("model_type" )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(UpperCamelCase )
lowerCamelCase_ = use_timm_backbone
lowerCamelCase_ = backbone_config
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_queries
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = init_xavier_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = auxiliary_loss
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = backbone
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = dilation
# deformable attributes
lowerCamelCase_ = num_feature_levels
lowerCamelCase_ = encoder_n_points
lowerCamelCase_ = decoder_n_points
lowerCamelCase_ = two_stage
lowerCamelCase_ = two_stage_num_proposals
lowerCamelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase_ = class_cost
lowerCamelCase_ = bbox_cost
lowerCamelCase_ = giou_cost
# Loss coefficients
lowerCamelCase_ = mask_loss_coefficient
lowerCamelCase_ = dice_loss_coefficient
lowerCamelCase_ = bbox_loss_coefficient
lowerCamelCase_ = giou_loss_coefficient
lowerCamelCase_ = eos_coefficient
lowerCamelCase_ = focal_alpha
lowerCamelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def snake_case ( self ):
"""simple docstring"""
return self.d_model
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 675 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Union[str, Any] = value_function
lowerCamelCase_ : Union[str, Any] = unet
lowerCamelCase_ : Dict = scheduler
lowerCamelCase_ : Optional[Any] = env
lowerCamelCase_ : int = env.get_dataset()
lowerCamelCase_ : Any = {}
for key in self.data.keys():
try:
lowerCamelCase_ : str = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase_ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase_ : int = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase_ : str = env.observation_space.shape[0]
lowerCamelCase_ : int = env.action_space.shape[0]
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __UpperCamelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if type(UpperCamelCase_ ) is dict:
return {k: self.to_torch(UpperCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCamelCase_ , device=self.unet.device )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowerCamelCase_ : Optional[int] = val.clone()
return x_in
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : List[str] = x.shape[0]
lowerCamelCase_ : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase_ : str = torch.full((batch_size,) , UpperCamelCase_ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase_ : List[str] = self.value_function(x.permute(0 , 2 , 1 ) , UpperCamelCase_ ).sample
lowerCamelCase_ : Tuple = torch.autograd.grad([y.sum()] , [x] )[0]
lowerCamelCase_ : List[str] = self.scheduler._get_variance(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowerCamelCase_ : List[str] = model_std * grad
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Union[str, Any] = x.detach()
lowerCamelCase_ : Optional[Any] = x + scale * grad
lowerCamelCase_ : List[Any] = self.reset_xa(UpperCamelCase_ , UpperCamelCase_ , self.action_dim )
lowerCamelCase_ : List[Any] = self.unet(x.permute(0 , 2 , 1 ) , UpperCamelCase_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase_ : Optional[int] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , predict_epsilon=UpperCamelCase_ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase_ : Dict = self.reset_xa(UpperCamelCase_ , UpperCamelCase_ , self.action_dim )
lowerCamelCase_ : Union[str, Any] = self.to_torch(UpperCamelCase_ )
return x, y
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=64 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Dict=0.1 ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.normalize(UpperCamelCase_ , '''observations''' )
lowerCamelCase_ : List[Any] = obs[None].repeat(UpperCamelCase_ , axis=0 )
lowerCamelCase_ : Dict = {0: self.to_torch(UpperCamelCase_ )}
lowerCamelCase_ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase_ : int = randn_tensor(UpperCamelCase_ , device=self.unet.device )
lowerCamelCase_ : List[str] = self.reset_xa(UpperCamelCase_ , UpperCamelCase_ , self.action_dim )
lowerCamelCase_ : int = self.to_torch(UpperCamelCase_ )
# run the diffusion process
lowerCamelCase_ , lowerCamelCase_ : List[Any] = self.run_diffusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# sort output trajectories by value
lowerCamelCase_ : List[str] = y.argsort(0 , descending=UpperCamelCase_ ).squeeze()
lowerCamelCase_ : Any = x[sorted_idx]
lowerCamelCase_ : str = sorted_values[:, :, : self.action_dim]
lowerCamelCase_ : int = actions.detach().cpu().numpy()
lowerCamelCase_ : Tuple = self.de_normalize(UpperCamelCase_ , key='''actions''' )
# select the action with the highest value
if y is not None:
lowerCamelCase_ : Optional[Any] = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase_ : str = np.random.randint(0 , UpperCamelCase_ )
lowerCamelCase_ : List[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 501 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ = model
lowerCamelCase_ = 2
lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# load longformer model from model identifier
lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = LightningModel(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase_ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 675 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger("""transformers.models.encodec""")
__UpperCAmelCase = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__UpperCAmelCase = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__UpperCAmelCase = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__UpperCAmelCase = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__UpperCAmelCase = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__UpperCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__UpperCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__UpperCAmelCase = []
__UpperCAmelCase = []
def snake_case_ (__A : Optional[Any] , __A : List[Any] , __A : Tuple , __A : List[str] , __A : Tuple ) -> Optional[int]:
for attribute in key.split(""".""" ):
__lowerCAmelCase : Tuple = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
__lowerCAmelCase : Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
__lowerCAmelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__lowerCAmelCase : List[str] = value
elif weight_type == "weight_g":
__lowerCAmelCase : int = value
elif weight_type == "weight_v":
__lowerCAmelCase : List[str] = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
elif weight_type == "running_mean":
__lowerCAmelCase : Tuple = value
elif weight_type == "running_var":
__lowerCAmelCase : Optional[int] = value
elif weight_type == "num_batches_tracked":
__lowerCAmelCase : Optional[int] = value
elif weight_type == "weight_ih_l0":
__lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_hh_l0":
__lowerCAmelCase : str = value
elif weight_type == "bias_ih_l0":
__lowerCAmelCase : Tuple = value
elif weight_type == "bias_hh_l0":
__lowerCAmelCase : Tuple = value
elif weight_type == "weight_ih_l1":
__lowerCAmelCase : List[str] = value
elif weight_type == "weight_hh_l1":
__lowerCAmelCase : List[Any] = value
elif weight_type == "bias_ih_l1":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "bias_hh_l1":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def snake_case_ (__A : Tuple , __A : Optional[int] ) -> Tuple:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case_ (__A : List[str] , __A : List[Any] , __A : Tuple ) -> Optional[Any]:
__lowerCAmelCase : str = []
if model_name == "encodec_24khz" or "encodec_32khz":
__lowerCAmelCase : int = MAPPING_24K
elif model_name == "encodec_48khz":
__lowerCAmelCase : Union[str, Any] = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info(f'''{name} was ignored''' )
continue
__lowerCAmelCase : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
__lowerCAmelCase : str = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
__lowerCAmelCase : Optional[int] = True
if "*" in mapped_key:
__lowerCAmelCase : Dict = name.split(UpperCAmelCase_ )[0].split(""".""" )[-2]
__lowerCAmelCase : List[Any] = mapped_key.replace("""*""" , UpperCAmelCase_ )
if "weight_g" in name:
__lowerCAmelCase : List[Any] = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase : List[Any] = """weight_v"""
elif "weight_ih_l0" in name:
__lowerCAmelCase : Any = """weight_ih_l0"""
elif "weight_hh_l0" in name:
__lowerCAmelCase : Optional[int] = """weight_hh_l0"""
elif "bias_ih_l0" in name:
__lowerCAmelCase : Dict = """bias_ih_l0"""
elif "bias_hh_l0" in name:
__lowerCAmelCase : str = """bias_hh_l0"""
elif "weight_ih_l1" in name:
__lowerCAmelCase : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
__lowerCAmelCase : str = """weight_hh_l1"""
elif "bias_ih_l1" in name:
__lowerCAmelCase : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
__lowerCAmelCase : List[Any] = """bias_hh_l1"""
elif "bias" in name:
__lowerCAmelCase : Any = """bias"""
elif "weight" in name:
__lowerCAmelCase : Dict = """weight"""
elif "running_mean" in name:
__lowerCAmelCase : int = """running_mean"""
elif "running_var" in name:
__lowerCAmelCase : Tuple = """running_var"""
elif "num_batches_tracked" in name:
__lowerCAmelCase : int = """num_batches_tracked"""
else:
__lowerCAmelCase : List[str] = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def snake_case_ (__A : Any , __A : Optional[Any] , __A : Dict , __A : List[str]=None , __A : Optional[int]=None , ) -> int:
if config_path is not None:
__lowerCAmelCase : List[Any] = EncodecConfig.from_pretrained(UpperCAmelCase_ )
else:
__lowerCAmelCase : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__lowerCAmelCase : List[Any] = [8, 5, 4, 4]
__lowerCAmelCase : Optional[Any] = [2.2]
__lowerCAmelCase : int = 6_4
__lowerCAmelCase : List[str] = 3_2_0_0_0
__lowerCAmelCase : List[str] = 2_0_4_8
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : List[Any] = False
elif model_name == "encodec_48khz":
__lowerCAmelCase : Tuple = [8, 5, 4, 2]
__lowerCAmelCase : Optional[int] = [3.0, 6.0, 12.0, 24.0]
__lowerCAmelCase : Union[str, Any] = 4_8_0_0_0
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Union[str, Any] = """time_group_norm"""
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Tuple = 1.0
__lowerCAmelCase : Optional[Any] = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
__lowerCAmelCase : Optional[int] = EncodecModel(UpperCAmelCase_ )
__lowerCAmelCase : str = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
__lowerCAmelCase : List[str] = torch.load(UpperCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__lowerCAmelCase : Any = original_checkpoint["""best_state"""]
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 651 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Optional[Any] = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 | 0 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( _a ):
_A : List[Any] = '''M-CLIP'''
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]=1_024 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=768 ,**SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:Optional[Any] = transformerDimSize
SCREAMING_SNAKE_CASE:Optional[Any] = imageDimSize
super().__init__(**SCREAMING_SNAKE_CASE__ )
class _snake_case ( _a ):
_A : Optional[int] = MCLIPConfig
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : int ):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = XLMRobertaModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:Optional[Any] = self.transformer(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE:List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(SCREAMING_SNAKE_CASE__ ), embs
| 143 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = GPTSwaTokenizer
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "This is a test"
lowerCamelCase_ = "This is a test"
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "<s>"
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCamelCase ) , 2000 )
def snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
# fmt: off
self.assertListEqual(
UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase )
lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."]
lowerCamelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
| 675 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Union[str, Any] =GPTSwaTokenizer
a : Dict =False
a : int =True
a : Tuple =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE,eos_token="""<unk>""",bos_token="""<unk>""",pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = """This is a test"""
return input_text, output_text
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """<s>"""
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"""<unk>""" )
self.assertEqual(vocab_keys[1],"""<s>""" )
self.assertEqual(vocab_keys[-1],"""j""" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),20_00 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,20_00 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ),[4_65, 2_87, 2_65, 6_31, 8_42] )
__lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""],)
# fmt: on
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],)
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__lowerCAmelCase = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE,model_name="""AI-Sweden/gpt-sw3-126m""",sequences=__SCREAMING_SNAKE_CASE,)
| 689 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "OwlViTImageProcessor"
_lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase ))
lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , )
return self.image_processor
| 675 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = '''umt5'''
__SCREAMING_SNAKE_CASE : int = ['''past_key_values''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str=2_5_0_1_1_2 , SCREAMING_SNAKE_CASE__ : Any=5_1_2 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : str=1_0_2_4 , SCREAMING_SNAKE_CASE__ : List[Any]=8 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : List[str]="gated-gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str="T5Tokenizer" , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : int=0 , **SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=SCREAMING_SNAKE_CASE__ , tokenizer_class=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = vocab_size
__a : List[str] = d_model
__a : Dict = d_kv
__a : List[Any] = d_ff
__a : List[str] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : List[str] = num_heads
__a : Dict = relative_attention_num_buckets
__a : str = relative_attention_max_distance
__a : int = dropout_rate
__a : str = layer_norm_epsilon
__a : int = initializer_factor
__a : List[str] = feed_forward_proj
__a : Optional[int] = use_cache
__a : Union[str, Any] = self.feed_forward_proj.split('-' )
__a : Optional[Any] = act_info[-1]
__a : Optional[Any] = act_info[0] == 'gated'
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[Any] = 'gelu_new'
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.d_model
@property
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.num_heads
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.num_layers
class _UpperCamelCase( __lowerCamelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[str] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Any = 'past_encoder_sequence + sequence'
__a : List[Any] = {0: 'batch'}
__a : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : Tuple = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return 1_3
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 5e-4
| 47 |
'''simple docstring'''
import os
import sys
import unittest
a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""")
a_ : List[Any] = """
{0} = None
"""
a_ : Optional[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
a_ : str = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(UpperCamelCase )
lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(UpperCamelCase , "tokenizers" )
lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(UpperCamelCase , "tensorflow_text" )
lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" )
lowerCamelCase_ = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" )
lowerCamelCase_ = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , UpperCamelCase )
self.assertIn("tensorflow_text" , UpperCamelCase )
self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" )
lowerCamelCase_ = create_dummy_object("function" , "'torch'" )
self.assertEqual(
UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , UpperCamelCase )
| 675 | 0 |
from __future__ import annotations
_UpperCamelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_UpperCamelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Any = len(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
__lowerCAmelCase : str = -1
for j in range(i + 1 , UpperCAmelCase_ ):
if arr[i] < arr[j]:
__lowerCAmelCase : List[Any] = arr[j]
break
result.append(UpperCAmelCase_ )
return result
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Tuple = []
for i, outer in enumerate(UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = -1
for inner in arr[i + 1 :]:
if outer < inner:
__lowerCAmelCase : Optional[int] = inner
break
result.append(UpperCAmelCase_ )
return result
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = len(UpperCAmelCase_ )
__lowerCAmelCase : str = []
__lowerCAmelCase : Union[str, Any] = [-1] * arr_size
for index in reversed(range(UpperCAmelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__lowerCAmelCase : Union[str, Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_UpperCamelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 492 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=lowercase ):
"""simple docstring"""
_lowerCamelCase = ["onnx"]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 675 | 0 |
def a ( A__ : int = 4000000 ) -> Any:
"""simple docstring"""
_lowercase =[]
_lowercase , _lowercase =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(UpperCAmelCase_ )
_lowercase , _lowercase =b, a + b
return sum(UpperCAmelCase_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 291 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = range_bbox
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ = bbox[i, j, 3]
lowerCamelCase_ = bbox[i, j, 1]
lowerCamelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ = bbox[i, j, 2]
lowerCamelCase_ = bbox[i, j, 0]
lowerCamelCase_ = t
lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = 10
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
# test the sequence output on [0, :3, :3]
lowerCamelCase_ = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized sequence classification head
lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(
input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = (2,)
self.assertEqual(loss.shape , UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = (2, 2)
self.assertEqual(logits.shape , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized token classification head
lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(
input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized token classification head
lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , UpperCamelCase )
self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
| 675 | 0 |
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def a_ ( _A , _A=0 ) -> Dict:
"""simple docstring"""
return sorted(UpperCAmelCase_ , key=lambda _A : x[column] )
def a_ ( _A , _A , _A=float('inf' ) ) -> Dict:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
snake_case__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case__ = current_dis
return min_dis
def a_ ( _A , _A , _A=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , UpperCAmelCase_ ):
for j in range(max(0 , i - 6 ) , UpperCAmelCase_ ):
snake_case__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case__ = current_dis
return min_dis
def a_ ( _A , _A , _A ) -> int:
"""simple docstring"""
# base case
if points_counts <= 3:
return dis_between_closest_pair(UpperCAmelCase_ , UpperCAmelCase_ )
# recursion
snake_case__ = points_counts // 2
snake_case__ = closest_pair_of_points_sqr(
UpperCAmelCase_ , points_sorted_on_y[:mid] , UpperCAmelCase_ )
snake_case__ = closest_pair_of_points_sqr(
UpperCAmelCase_ , points_sorted_on_y[mid:] , points_counts - mid )
snake_case__ = min(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCAmelCase_ )
snake_case__ = dis_between_closest_in_strip(
UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return min(UpperCAmelCase_ , UpperCAmelCase_ )
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = column_based_sort(UpperCAmelCase_ , column=0 )
snake_case__ = column_based_sort(UpperCAmelCase_ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
) ** 0.5
if __name__ == "__main__":
__UpperCamelCase : Any = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 328 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a_ : int = """docs/source/en/_toctree.yml"""
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = defaultdict(UpperCAmelCase_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(UpperCAmelCase_ )
lowerCamelCase_ = new_doc_list
lowerCamelCase_ = [key for key, value in counts.items() if value > 1]
lowerCamelCase_ = []
for duplicate_key in duplicates:
lowerCamelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(UpperCAmelCase_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase_ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(UpperCAmelCase_ )
# Sort
return overview_doc
def __snake_case ( UpperCAmelCase_ : List[str]=False ):
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
lowerCamelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase_ = content[api_idx]["sections"]
# Then to the model doc
lowerCamelCase_ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCamelCase_ = api_doc[scheduler_idx]["sections"]
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
lowerCamelCase_ = False
if new_scheduler_doc != scheduler_doc:
lowerCamelCase_ = True
if overwrite:
lowerCamelCase_ = new_scheduler_doc
if diff:
if overwrite:
lowerCamelCase_ = api_doc
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def __snake_case ( UpperCAmelCase_ : List[Any]=False ):
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
lowerCamelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase_ = content[api_idx]["sections"]
# Then to the model doc
lowerCamelCase_ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCamelCase_ = False
lowerCamelCase_ = api_doc[pipeline_idx]["sections"]
lowerCamelCase_ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCamelCase_ = pipeline_doc["section"]
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
if overwrite:
lowerCamelCase_ = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase_ )
# sort overall pipeline doc
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
lowerCamelCase_ = True
if overwrite:
lowerCamelCase_ = new_pipeline_docs
if diff:
if overwrite:
lowerCamelCase_ = api_doc
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : int = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 675 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=8 ) -> Union[str, Any]:
_snake_case : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_snake_case : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , movq=lowerCAmelCase , )
_snake_case : str = 2 ** (len(self.movq.config.block_out_channels) - 1)
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any]) -> List[str]:
"""simple docstring"""
if latents is None:
_snake_case : Union[str, Any] = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase)
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
_snake_case : Optional[int] = latents.to(lowerCAmelCase)
_snake_case : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : str=0) -> Any:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
_snake_case : Optional[int] = torch.device(F'''cuda:{gpu_id}''')
_snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : str , lowerCAmelCase : str=0) -> List[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0"""):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""")
_snake_case : Optional[Any] = torch.device(F'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCAmelCase)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_snake_case , _snake_case : int = cpu_offload_with_hook(lowerCAmelCase , lowerCAmelCase , prev_module_hook=lowerCAmelCase)
# We'll offload the last model manually.
_snake_case : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase_ ( self : Any) -> Optional[Any]:
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook"""):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase)
def __call__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any = 512 , lowerCAmelCase : Union[str, Any] = 512 , lowerCAmelCase : List[str] = 100 , lowerCAmelCase : Dict = 4.0 , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : List[str] = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : Optional[int] = "pil" , lowerCAmelCase : int = True , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = self._execution_device
_snake_case : Optional[int] = guidance_scale > 1.0
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = torch.cat(lowerCAmelCase , dim=0)
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Any = torch.cat(lowerCAmelCase , dim=0)
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Any = torch.cat(lowerCAmelCase , dim=0)
_snake_case : Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_snake_case : Union[str, Any] = image_embeds.repeat_interleave(lowerCAmelCase , dim=0)
_snake_case : Tuple = negative_image_embeds.repeat_interleave(lowerCAmelCase , dim=0)
_snake_case : List[str] = hint.repeat_interleave(lowerCAmelCase , dim=0)
_snake_case : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowerCAmelCase)
_snake_case : Optional[int] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowerCAmelCase)
self.scheduler.set_timesteps(lowerCAmelCase , device=lowerCAmelCase)
_snake_case : str = self.scheduler.timesteps
_snake_case : int = self.movq.config.latent_channels
_snake_case , _snake_case : int = downscale_height_and_width(lowerCAmelCase , lowerCAmelCase , self.movq_scale_factor)
# create initial latent
_snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase)):
# expand the latents if we are doing classifier free guidance
_snake_case : Tuple = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_snake_case : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint}
_snake_case : Optional[int] = self.unet(
sample=lowerCAmelCase , timestep=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , added_cond_kwargs=lowerCAmelCase , return_dict=lowerCAmelCase , )[0]
if do_classifier_free_guidance:
_snake_case , _snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1)
_snake_case , _snake_case : Any = noise_pred.chunk(2)
_snake_case , _snake_case : int = variance_pred.chunk(2)
_snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , """variance_type""")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case , _snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
_snake_case : str = self.scheduler.step(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase , )[0]
# post-processing
_snake_case : str = self.movq.decode(lowerCAmelCase , force_not_quantize=lowerCAmelCase)["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
_snake_case : Optional[Any] = image * 0.5 + 0.5
_snake_case : int = image.clamp(0 , 1)
_snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
_snake_case : Union[str, Any] = self.numpy_to_pil(lowerCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase)
| 477 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ):
lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ )
lowerCamelCase_ = tok.pad_token_id
def get_lens(UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = tqdm(
DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCamelCase_ = []
for batch in dl:
lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist()
lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) )
else:
max_lens.extend(UpperCAmelCase_ )
return max_lens
lowerCamelCase_ = get_lens(UpperCAmelCase_ )
lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ )
lowerCamelCase_ = get_lens(UpperCAmelCase_ )
pickle_save(UpperCAmelCase_ , train_ds.len_file )
pickle_save(UpperCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 675 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =IFPipeline
lowercase : Any =TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : List[str] =PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''', variant='''fp16''', torch_dtype=torch.floataa )
lowerCamelCase_ =IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''', variant='''fp16''', torch_dtype=torch.floataa, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
lowerCamelCase_, lowerCamelCase_ =pipe_a.encode_prompt('''anime turtle''', device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCamelCase_ =None
lowerCamelCase_ =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCamelCase_ =IFImgaImgPipeline(**pipe_a.components )
lowerCamelCase_ =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCamelCase_ =IFInpaintingPipeline(**pipe_a.components )
lowerCamelCase_ =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
_start_torch_memory_measurement()
lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ =pipe_a(
prompt_embeds=lowerCAmelCase, negative_prompt_embeds=lowerCAmelCase, num_inference_steps=2, generator=lowerCAmelCase, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCamelCase_ =pipe_a(
prompt_embeds=lowerCAmelCase, negative_prompt_embeds=lowerCAmelCase, image=lowerCAmelCase, generator=lowerCAmelCase, num_inference_steps=2, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
_start_torch_memory_measurement()
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ =pipe_a(
prompt_embeds=lowerCAmelCase, negative_prompt_embeds=lowerCAmelCase, image=lowerCAmelCase, num_inference_steps=2, generator=lowerCAmelCase, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ =floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCamelCase_ =pipe_a(
prompt_embeds=lowerCAmelCase, negative_prompt_embeds=lowerCAmelCase, image=lowerCAmelCase, original_image=lowerCAmelCase, generator=lowerCAmelCase, num_inference_steps=2, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
_start_torch_memory_measurement()
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ =pipe_a(
prompt_embeds=lowerCAmelCase, negative_prompt_embeds=lowerCAmelCase, image=lowerCAmelCase, mask_image=lowerCAmelCase, num_inference_steps=2, generator=lowerCAmelCase, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(lowerCAmelCase )
lowerCamelCase_ =pipe_a(
prompt_embeds=lowerCAmelCase, negative_prompt_embeds=lowerCAmelCase, image=lowerCAmelCase, mask_image=lowerCAmelCase, original_image=lowerCAmelCase, generator=lowerCAmelCase, num_inference_steps=2, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
def a_ ( ) -> int:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 676 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 1 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCamelCase_ =deepcopy(lowerCAmelCase )
elif os.path.exists(lowerCAmelCase ):
with io.open(lowerCAmelCase, '''r''', encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.load(lowerCAmelCase )
else:
try:
lowerCamelCase_ =baseaa.urlsafe_baadecode(lowerCAmelCase ).decode('''utf-8''' )
lowerCamelCase_ =json.loads(lowerCAmelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowerCamelCase_ =config
self.set_stage_and_offload()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_value('''zero_optimization.stage''', -1 )
# offload
lowerCamelCase_ =False
if self.is_zeroa() or self.is_zeroa():
lowerCamelCase_ =set(['''cpu''', '''nvme'''] )
lowerCamelCase_ =set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCamelCase_ =True
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.config
# find the config node of interest if it exists
lowerCamelCase_ =ds_key_long.split('''.''' )
lowerCamelCase_ =nodes.pop()
for node in nodes:
lowerCamelCase_ =config.get(lowerCAmelCase )
if config is None:
return None, ds_key
return config, ds_key
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.find_config_node(lowerCAmelCase )
if config is None:
return default
return config.get(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =self.config
# find the config node of interest if it exists
lowerCamelCase_ =ds_key_long.split('''.''' )
for node in nodes:
lowerCamelCase_ =config
lowerCamelCase_ =config.get(lowerCAmelCase )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_value(lowerCAmelCase )
return False if value is None else bool(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_value(lowerCAmelCase )
return False if value is None else not bool(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
return self._stage == 2
def lowercase__ ( self ):
"""simple docstring"""
return self._stage == 3
def lowercase__ ( self ):
"""simple docstring"""
return self._offload
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =engine
def lowercase__ ( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
self.engine.backward(lowerCAmelCase, **lowerCAmelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
super().__init__(lowerCAmelCase, device_placement=lowerCAmelCase, scaler=lowerCAmelCase )
lowerCamelCase_ =hasattr(self.optimizer, '''overflow''' )
def lowercase__ ( self, lowerCAmelCase=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase__ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=0.0_0_1, lowerCAmelCase=0, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =params
lowerCamelCase_ =lr
lowerCamelCase_ =weight_decay
lowerCamelCase_ =kwargs
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=0, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =optimizer
lowerCamelCase_ =total_num_steps
lowerCamelCase_ =warmup_num_steps
lowerCamelCase_ =kwargs
| 676 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : list[float] ) -> bool:
"""simple docstring"""
if len(__snake_case ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowerCamelCase_ =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676 | 1 |
'''simple docstring'''
a_ : Dict = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
a_ : Tuple = frozenset(["""prompt""", """negative_prompt"""])
a_ : str = frozenset([])
a_ : List[str] = frozenset(["""image"""])
a_ : Union[str, Any] = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
a_ : Optional[Any] = frozenset(["""image"""])
a_ : Dict = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
a_ : int = frozenset(["""prompt""", """image""", """negative_prompt"""])
a_ : List[Any] = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
a_ : Any = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
a_ : Optional[int] = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
a_ : int = frozenset(["""image""", """mask_image"""])
a_ : Optional[Any] = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
a_ : Optional[Any] = frozenset(["""example_image""", """image""", """mask_image"""])
a_ : str = frozenset(["""class_labels"""])
a_ : Any = frozenset(["""class_labels"""])
a_ : Optional[Any] = frozenset(["""batch_size"""])
a_ : str = frozenset([])
a_ : Dict = frozenset(["""batch_size"""])
a_ : Any = frozenset([])
a_ : int = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
a_ : Optional[Any] = frozenset(["""prompt""", """negative_prompt"""])
a_ : Tuple = frozenset(["""input_tokens"""])
a_ : Dict = frozenset(["""input_tokens"""])
| 676 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Any = 16
a_ : List[str] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> int:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : int = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> Tuple:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# New Code #
lowerCamelCase_ =int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCamelCase_ =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
set_seed(__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 676 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=19, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=lowerCAmelCase, esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False}, )
return config
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =EsmForProteinFolding(config=lowerCAmelCase ).float()
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Tuple =False
lowercase : str =(EsmForProteinFolding,) if is_torch_available() else ()
lowercase : Optional[Any] =()
lowercase : List[str] ={} if is_torch_available() else {}
lowercase : Tuple =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =EsmFoldModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip('''Does not support attention outputs''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
class __UpperCamelCase ( lowerCamelCase__ ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCamelCase_ =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ =model(lowerCAmelCase )['''positions''']
lowerCamelCase_ =torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], lowerCAmelCase, atol=1e-4 ) )
| 676 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a_ ( __snake_case : Callable , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ =np.zeros((n + 1,) )
lowerCamelCase_ =ya
lowerCamelCase_ =xa
for k in range(__snake_case ):
lowerCamelCase_ =y[k] + step_size * ode_func(__snake_case , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 1 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( enum.Enum ):
lowercase : int =0
lowercase : str =1
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] ='generated'
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={}
if truncation is not None:
lowerCamelCase_ =truncation
lowerCamelCase_ =generate_kwargs
lowerCamelCase_ ={}
if return_tensors is not None and return_type is None:
lowerCamelCase_ =ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCamelCase_ =return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase_ =clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase_ =self.tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowerCamelCase_ =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return True
def lowercase__ ( self, *lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0], lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowerCamelCase_ =([prefix + arg for arg in args[0]],)
lowerCamelCase_ =True
elif isinstance(args[0], lowerCAmelCase ):
lowerCamelCase_ =(prefix + args[0],)
lowerCamelCase_ =False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowerCamelCase_ =self.tokenizer(*lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =super().__call__(*lowerCAmelCase, **lowerCAmelCase )
if (
isinstance(args[0], lowerCAmelCase )
and all(isinstance(lowerCAmelCase, lowerCAmelCase ) for el in args[0] )
and all(len(lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self._parse_and_tokenize(lowerCAmelCase, truncation=lowerCAmelCase, **lowerCAmelCase )
return inputs
def lowercase__ ( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self.framework == "pt":
lowerCamelCase_, lowerCamelCase_ =model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowerCamelCase_, lowerCamelCase_ =tf.shape(model_inputs['''input_ids'''] ).numpy()
lowerCamelCase_ =generate_kwargs.get('''min_length''', self.model.config.min_length )
lowerCamelCase_ =generate_kwargs.get('''max_length''', self.model.config.max_length )
self.check_inputs(lowerCAmelCase, generate_kwargs['''min_length'''], generate_kwargs['''max_length'''] )
lowerCamelCase_ =self.model.generate(**lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =output_ids.shape[0]
if self.framework == "pt":
lowerCamelCase_ =output_ids.reshape(lowerCAmelCase, out_b // in_b, *output_ids.shape[1:] )
elif self.framework == "tf":
lowerCamelCase_ =tf.reshape(lowerCAmelCase, (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=ReturnType.TEXT, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCamelCase_ ={f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowerCamelCase_ ={
f'''{self.return_name}_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] ='summary'
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='translation'
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowercase__ ( self, *lowerCAmelCase, lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
if getattr(self.tokenizer, '''_build_translation_inputs''', lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*lowerCAmelCase, return_tensors=self.framework, truncation=lowerCAmelCase, src_lang=lowerCAmelCase, tgt_lang=lowerCAmelCase )
else:
return super()._parse_and_tokenize(*lowerCAmelCase, truncation=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =super()._sanitize_parameters(**lowerCAmelCase )
if src_lang is not None:
lowerCamelCase_ =src_lang
if tgt_lang is not None:
lowerCamelCase_ =tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCamelCase_ =kwargs.get('''task''', self.task )
lowerCamelCase_ =task.split('''_''' )
if task and len(lowerCAmelCase ) == 4:
# translation, XX, to YY
lowerCamelCase_ =items[1]
lowerCamelCase_ =items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(*lowerCAmelCase, **lowerCAmelCase )
| 676 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 1 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : Optional[Any] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Optional[Any] ='maskformer-swin'
lowercase : Tuple ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, lowerCAmelCase=224, lowerCAmelCase=4, lowerCAmelCase=3, lowerCAmelCase=96, lowerCAmelCase=[2, 2, 6, 2], lowerCAmelCase=[3, 6, 12, 24], lowerCAmelCase=7, lowerCAmelCase=4.0, lowerCAmelCase=True, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.1, lowerCAmelCase="gelu", lowerCAmelCase=False, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-5, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =embed_dim
lowerCamelCase_ =depths
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =num_heads
lowerCamelCase_ =window_size
lowerCamelCase_ =mlp_ratio
lowerCamelCase_ =qkv_bias
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =hidden_act
lowerCamelCase_ =use_absolute_embeddings
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ =int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
lowerCamelCase_ =['''stem'''] + [f'''stage{idx}''' for idx in range(1, len(lowerCAmelCase ) + 1 )]
lowerCamelCase_, lowerCamelCase_ =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase, out_indices=lowerCAmelCase, stage_names=self.stage_names )
| 676 |
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ ={}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=1 ):
"""simple docstring"""
if self.graph.get(lowerCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase_ =[[w, v]]
if not self.graph.get(lowerCAmelCase ):
lowerCamelCase_ =[]
def lowercase__ ( self ):
"""simple docstring"""
return list(self.graph )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ):
"""simple docstring"""
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return visited
def lowercase__ ( self, lowerCAmelCase=-1 ):
"""simple docstring"""
if c == -1:
lowerCamelCase_ =floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase, lowerCAmelCase, 1 )
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return len(self.graph[u] )
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return sorted_nodes
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return list(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return False
def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ):
"""simple docstring"""
lowerCamelCase_ =time()
self.dfs(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =time()
return end - begin
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =time()
self.bfs(lowerCAmelCase )
lowerCamelCase_ =time()
return end - begin
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ ={}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=1 ):
"""simple docstring"""
if self.graph.get(lowerCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase_ =[[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase_ =[[w, u]]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase )
# the other way round
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ):
"""simple docstring"""
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return visited
def lowercase__ ( self, lowerCAmelCase=-1 ):
"""simple docstring"""
if c == -1:
lowerCamelCase_ =floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase, lowerCAmelCase, 1 )
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return len(self.graph[u] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return list(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return False
def lowercase__ ( self ):
"""simple docstring"""
return list(self.graph )
def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ):
"""simple docstring"""
lowerCamelCase_ =time()
self.dfs(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =time()
return end - begin
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =time()
self.bfs(lowerCAmelCase )
lowerCamelCase_ =time()
return end - begin
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a_ ( __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for param in module.parameters():
lowerCamelCase_ =False
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase_ ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =plt.imshow(__snake_case )
fig.axes.get_xaxis().set_visible(__snake_case )
fig.axes.get_yaxis().set_visible(__snake_case )
plt.show()
def a_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =datetime.now()
lowerCamelCase_ =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 676 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 1 |
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a_ : Tuple = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self, **lowerCAmelCase ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase_ =deprecated_arg[3:]
setattr(self, lowerCAmelCase, not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowerCamelCase_ =kwargs.pop('''torchscript''', self.torchscript )
lowerCamelCase_ =kwargs.pop('''torch_xla_tpu_print_metrics''', self.torch_xla_tpu_print_metrics )
lowerCamelCase_ =kwargs.pop('''fp16_opt_level''', self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
lowercase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Trace the models using torchscript'} )
lowercase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowercase : str =field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self, ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowerCamelCase_ =torch.device('''cpu''' )
lowerCamelCase_ =0
elif is_torch_tpu_available():
lowerCamelCase_ =xm.xla_device()
lowerCamelCase_ =0
else:
lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCamelCase_ =torch.cuda.device_count()
return device, n_gpu
@property
def lowercase__ ( self ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self, ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self, ['''torch'''] )
return self._setup_devices[0]
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self, ['''torch'''] )
return self._setup_devices[1]
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 676 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 1 |
'''simple docstring'''
import numpy as np
def a_ ( __snake_case : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def a_ ( __snake_case : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a_ : str = get_tests_dir("""fixtures""")
a_ : Optional[int] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
a_ : List[str] = get_tests_dir("""fixtures/dummy-config.json""")
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =0
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ =WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase ).to_dict()
config_dict.pop('''feature_extractor_type''' )
lowerCamelCase_ =WavaVecaFeatureExtractor(**lowerCAmelCase )
# save in new folder
model_config.save_pretrained(lowerCAmelCase )
config.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
# make sure private variable is not incorrectly saved
lowerCamelCase_ =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase, '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase, revision='''aaaaaa''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase, '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''', ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''', trust_remote_code=lowerCAmelCase )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase, trust_remote_code=lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__, '''NewFeatureExtractor''' )
def lowercase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register('''custom''', lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ =CustomFeatureExtractor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ):
"""simple docstring"""
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =True
try:
AutoConfig.register('''custom''', lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# If remote code is not set, the default is to use local
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__, '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(lowerCAmelCase, '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 676 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple =1
@register_to_config
def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) )
lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ =diffusion.unsqueeze(-1 )
lowerCamelCase_ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype )
lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ =step_size.unsqueeze(-1 )
lowerCamelCase_ =sample + step_size * model_output
lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase_ =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 676 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple =1
@register_to_config
def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) )
lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ =diffusion.unsqueeze(-1 )
lowerCamelCase_ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype )
lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ =step_size.unsqueeze(-1 )
lowerCamelCase_ =sample + step_size * model_output
lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase_ =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 676 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a_ : Optional[int] = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def a_ ( __snake_case : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={}
state_dict.pop('''pixel_mean''' , __snake_case )
state_dict.pop('''pixel_std''' , __snake_case )
lowerCamelCase_ =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase_ =key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
lowerCamelCase_ =int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
lowerCamelCase_ =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowerCamelCase_ =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowerCamelCase_ =key.replace('''layers.2''' , '''proj_out''' )
lowerCamelCase_ =value
lowerCamelCase_ =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Tuple="ybelkada/segment-anything" ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =hf_hub_download(__snake_case , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase_ =SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase_ =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase_ =SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
lowerCamelCase_ =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase_ =SamConfig(
vision_config=__snake_case , )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
lowerCamelCase_ =replace_keys(__snake_case )
lowerCamelCase_ =SamImageProcessor()
lowerCamelCase_ =SamProcessor(image_processor=__snake_case )
lowerCamelCase_ =SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
lowerCamelCase_ =hf_model.to('''cuda''' )
lowerCamelCase_ ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
lowerCamelCase_ =[[[400, 650]]]
lowerCamelCase_ =[[1]]
lowerCamelCase_ =processor(images=np.array(__snake_case ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCamelCase_ =processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCamelCase_ =((75, 275, 1725, 850),)
lowerCamelCase_ =processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCamelCase_ =[[[400, 650], [800, 650]]]
lowerCamelCase_ =[[1, 1]]
lowerCamelCase_ =processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
a_ : Union[str, Any] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
a_ : Union[str, Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 |
'''simple docstring'''
from typing import List
import numpy as np
def a_ ( __snake_case : dict ) -> int:
"""simple docstring"""
lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
lowerCamelCase_ =max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def a_ ( __snake_case : int , __snake_case : int ) -> List[range]:
"""simple docstring"""
lowerCamelCase_ =[]
for group_idx in range(__snake_case ):
lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ =range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]:
"""simple docstring"""
lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def a_ ( __snake_case : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict:
"""simple docstring"""
lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
lowerCamelCase_ ={}
for size in list_sizes:
lowerCamelCase_ =list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ =dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 676 | 1 |
'''simple docstring'''
import requests
a_ : int = """""" # <-- Put your OpenWeatherMap appid here!
a_ : List[Any] = """https://api.openweathermap.org/data/2.5/"""
def a_ ( __snake_case : str = "Chicago" , __snake_case : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def a_ ( __snake_case : str = "Kolkata, India" , __snake_case : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def a_ ( __snake_case : float = 5_5.6_8 , __snake_case : float = 1_2.5_7 , __snake_case : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
a_ : Tuple = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 676 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : int = logging.getLogger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
lowerCamelCase_ =parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowerCamelCase_ =fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(__snake_case )} examples to process.''' )
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =1_0000
lowerCamelCase_ =time.time()
for text in data:
lowerCamelCase_ =F'''{bos} {text.strip()} {sep}'''
lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
rslt.append(__snake_case )
iter += 1
if iter % interval == 0:
lowerCamelCase_ =time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase_ =time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(__snake_case )} examples processed.''' )
lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase_ =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt]
else:
lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 676 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Tuple = logging.get_logger(__name__)
a_ : Tuple = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int ='table-transformer'
lowercase : Union[str, Any] =['past_key_values']
lowercase : Optional[int] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=3, lowerCAmelCase=100, lowerCAmelCase=6, lowerCAmelCase=2_048, lowerCAmelCase=8, lowerCAmelCase=6, lowerCAmelCase=2_048, lowerCAmelCase=8, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase="relu", lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, lowerCAmelCase=False, lowerCAmelCase="sine", lowerCAmelCase="resnet50", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=1, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=0.1, **lowerCAmelCase, ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCamelCase_ =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =backbone_config.get('''model_type''' )
lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ =config_class.from_dict(lowerCAmelCase )
# set timm attributes to None
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =None, None, None
lowerCamelCase_ =use_timm_backbone
lowerCamelCase_ =backbone_config
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_queries
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =init_xavier_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =auxiliary_loss
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =backbone
lowerCamelCase_ =use_pretrained_backbone
lowerCamelCase_ =dilation
# Hungarian matcher
lowerCamelCase_ =class_cost
lowerCamelCase_ =bbox_cost
lowerCamelCase_ =giou_cost
# Loss coefficients
lowerCamelCase_ =mask_loss_coefficient
lowerCamelCase_ =dice_loss_coefficient
lowerCamelCase_ =bbox_loss_coefficient
lowerCamelCase_ =giou_loss_coefficient
lowerCamelCase_ =eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.d_model
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-5
@property
def lowercase__ ( self ):
"""simple docstring"""
return 12
| 676 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
lowercase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Any =2
@register_to_config
def __init__( self, lowerCAmelCase = 0.0_2, lowerCAmelCase = 100, lowerCAmelCase = 1.0_0_7, lowerCAmelCase = 80, lowerCAmelCase = 0.0_5, lowerCAmelCase = 50, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None # sigma(t_i)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =num_inference_steps
lowerCamelCase_ =np.arange(0, self.num_inference_steps )[::-1].copy()
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase )
lowerCamelCase_ =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.floataa, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ =min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1 )
else:
lowerCamelCase_ =0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ =self.config.s_noise * randn_tensor(sample.shape, generator=lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =sigma + gamma * sigma
lowerCamelCase_ =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =sample_hat + sigma_hat * model_output
lowerCamelCase_ =(sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase, derivative=lowerCAmelCase, pred_original_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =sample_prev + sigma_prev * model_output
lowerCamelCase_ =(sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase, derivative=lowerCAmelCase, pred_original_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
| 676 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {"""vocab_file""": """spiece.model"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
a_ : List[Any] = {"""bert_for_seq_generation""": 5_12}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[int] =[]
lowercase : str =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : int =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase : Any =(
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : Union[str, Any] =False
lowercase : Optional[Any] =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
lowerCamelCase_ =tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
return inputs_dict
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
lowerCamelCase_ =embedding_size
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFMobileBertModel(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =[input_ids, input_mask]
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFMobileBertForMaskedLM(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFMobileBertForNextSentencePrediction(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFMobileBertForPreTraining(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =TFMobileBertForSequenceClassification(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =TFMobileBertForMultipleChoice(config=lowerCAmelCase )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =TFMobileBertForTokenClassification(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFMobileBertForQuestionAnswering(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
lowerCamelCase_ =TFMobileBertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowerCamelCase_ =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =[1, 6, 30_522]
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase, atol=1e-4 )
| 676 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
a_ : str = """Hello, World!"""
a_ : Optional[int] = """en_XX"""
def a_ ( __snake_case : str , __snake_case : str , __snake_case : bool ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =Path('''data_bin''' )
lowerCamelCase_ =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__snake_case ).parent ) , checkpoint_file=Path(__snake_case ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__snake_case ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__snake_case ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__snake_case )
lowerCamelCase_ =xmod.model.encoder.sentence_encoder
lowerCamelCase_ =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ =xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __snake_case )
lowerCamelCase_ =XmodForSequenceClassification(__snake_case ) if classification_head else XmodForMaskedLM(__snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ =xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ =xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ =xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ =model.roberta.encoder.layer[i]
lowerCamelCase_ =xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ =xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ =xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ =xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ =xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ =xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ =xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ =xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ =xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ =xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ =xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ =xmod_layer.fca.weight
lowerCamelCase_ =xmod_layer.fca.bias
# output
lowerCamelCase_ =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ =xmod_layer.fca.weight
lowerCamelCase_ =xmod_layer.fca.bias
lowerCamelCase_ =xmod_layer.final_layer_norm.weight
lowerCamelCase_ =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ =xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ =bert_output.adapter_modules[lang_code]
lowerCamelCase_ =xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ =from_adapter.fca.weight
lowerCamelCase_ =from_adapter.fca.bias
lowerCamelCase_ =from_adapter.fca.weight
lowerCamelCase_ =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ =xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ =xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ =xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ =xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ =xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ =xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ =xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ =xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ =xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ =xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ =xmod.model.encoder.lm_head.weight
lowerCamelCase_ =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ =xmod.encode(__snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__snake_case )
lowerCamelCase_ =model(__snake_case )[0]
if classification_head:
lowerCamelCase_ =xmod.model.classification_heads['''mnli'''](xmod.extract_features(__snake_case ) )
else:
lowerCamelCase_ =xmod.model(__snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ =torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowerCamelCase_ =torch.allclose(__snake_case , __snake_case , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__snake_case ).mkdir(parents=__snake_case , exist_ok=__snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a_ : Union[str, Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 676 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger()
def a_ ( __snake_case : int , __snake_case : str , __snake_case : LevitConfig , __snake_case : Path , __snake_case : bool = True ) -> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase_ =timm.create_model('''levit_128s''' , pretrained=__snake_case )
else:
lowerCamelCase_ =timm.create_model('''levit_128''' , pretrained=__snake_case )
if hidden_sizes == 192:
lowerCamelCase_ =timm.create_model('''levit_192''' , pretrained=__snake_case )
if hidden_sizes == 256:
lowerCamelCase_ =timm.create_model('''levit_256''' , pretrained=__snake_case )
if hidden_sizes == 384:
lowerCamelCase_ =timm.create_model('''levit_384''' , pretrained=__snake_case )
from_model.eval()
lowerCamelCase_ =LevitForImageClassificationWithTeacher(__snake_case ).eval()
lowerCamelCase_ =OrderedDict()
lowerCamelCase_ =from_model.state_dict()
lowerCamelCase_ =list(from_model.state_dict().keys() )
lowerCamelCase_ =list(our_model.state_dict().keys() )
print(len(__snake_case ) , len(__snake_case ) )
for i in range(len(__snake_case ) ):
lowerCamelCase_ =weights[og_keys[i]]
our_model.load_state_dict(__snake_case )
lowerCamelCase_ =torch.randn((2, 3, 224, 224) )
lowerCamelCase_ =from_model(__snake_case )
lowerCamelCase_ =our_model(__snake_case ).logits
assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one."
lowerCamelCase_ =name
print(__snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase_ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a_ ( __snake_case : Path , __snake_case : str = None , __snake_case : bool = True ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ='''imagenet-1k-id2label.json'''
lowerCamelCase_ =1000
lowerCamelCase_ =(1, num_labels)
lowerCamelCase_ ='''huggingface/label-files'''
lowerCamelCase_ =num_labels
lowerCamelCase_ =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
lowerCamelCase_ =partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
lowerCamelCase_ ={
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
lowerCamelCase_ ={
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __snake_case , names_to_config[model_name] , __snake_case , __snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __snake_case , __snake_case , __snake_case , __snake_case )
return config, expected_shape
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
a_ : Dict = parser.parse_args()
a_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 676 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676 | 1 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a_ : List[str] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a_ ( __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =test_results.split(''' ''' )
lowerCamelCase_ =0
lowerCamelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase_ =expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a_ ( __snake_case : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =None
lowerCamelCase_ =False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __snake_case ):
lowerCamelCase_ =True
lowerCamelCase_ =line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowerCamelCase_ =line
lowerCamelCase_ =False
return failures
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =title
lowerCamelCase_ =doc_test_results['''time_spent'''].split(''',''' )[0]
lowerCamelCase_ =doc_test_results['''success''']
lowerCamelCase_ =doc_test_results['''failures''']
lowerCamelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase_ =doc_test_results
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[self._time_spent]
lowerCamelCase_ =0
for time in time_spent:
lowerCamelCase_ =time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =[0, 0, time_parts[0]]
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f'''{int(lowerCAmelCase )}h{int(lowerCAmelCase )}m{int(lowerCAmelCase )}s'''
@property
def lowercase__ ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =40
lowerCamelCase_ ={k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase, lowerCAmelCase )}
lowerCamelCase_ =''''''
for category, failures in category_failures.items():
if len(lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase )
@staticmethod
def lowercase__ ( ):
"""simple docstring"""
lowerCamelCase_ =[
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowerCamelCase_ =f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
lowerCamelCase_ =client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=lowerCAmelCase, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =''''''
for key, value in failures.items():
lowerCamelCase_ =value[:200] + ''' [Truncated]''' if len(lowerCAmelCase ) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
lowerCamelCase_ =job_name
lowerCamelCase_ ={'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowerCamelCase_ ={
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase__ ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowerCamelCase_ =self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowerCamelCase_ =sorted(self.doc_test_results.items(), key=lambda lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowerCamelCase_ =f'''*Num failures* :{len(job_result['failed'] )} \n'''
lowerCamelCase_ =job_result['''failures''']
lowerCamelCase_ =self.get_reply_blocks(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, text=lowerCAmelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f'''Results for {job}''', blocks=lowerCAmelCase, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase_ =os.environ['''GITHUB_RUN_ID''']
lowerCamelCase_ =F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowerCamelCase_ =requests.get(__snake_case ).json()
lowerCamelCase_ ={}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCamelCase_ =math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__snake_case ):
lowerCamelCase_ =requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __snake_case )
return {}
def a_ ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={}
if os.path.exists(__snake_case ):
lowerCamelCase_ =os.listdir(__snake_case )
for file in files:
try:
with open(os.path.join(__snake_case , __snake_case ) , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__snake_case , __snake_case )}.''' ) from e
return _artifact
def a_ ( ) -> Tuple:
"""simple docstring"""
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =name
lowerCamelCase_ =[]
def __str__( self ):
"""simple docstring"""
return self.name
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
lowerCamelCase_ ={}
lowerCamelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase_ =directory
if artifact_name not in _available_artifacts:
lowerCamelCase_ =Artifact(__snake_case )
_available_artifacts[artifact_name].add_path(__snake_case )
return _available_artifacts
if __name__ == "__main__":
a_ : Optional[int] = get_job_links()
a_ : List[Any] = retrieve_available_artifacts()
a_ : Union[str, Any] = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a_ : List[str] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a_ : List[Any] = github_actions_job_links.get("""run_doctests""")
a_ : Dict = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
a_ : Union[str, Any] = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
a_ , a_ , a_ : Any = handle_test_results(artifact["""stats"""])
a_ : int = failed
a_ : str = success
a_ : Tuple = time_spent[1:-1] + """, """
a_ : int = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
a_ : Optional[int] = line.replace("""FAILED """, """""")
a_ : Optional[int] = line.split()[0].replace("""\n""", """""")
if "::" in line:
a_ , a_ : int = line.split("""::""")
else:
a_ , a_ : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a_ : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a_ : Optional[int] = all_failures[test] if test in all_failures else """N/A"""
a_ : Any = failure
break
a_ : List[Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 676 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Optional[Any] = {"""vocab_file""": """spiece.model"""}
a_ : Optional[Any] = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<sep>", lowerCAmelCase="<pad>", lowerCAmelCase="<cls>", lowerCAmelCase="<mask>", lowerCAmelCase=["<eop>", "<eod>"], lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else mask_token
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase, remove_space=lowerCAmelCase, keep_accents=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, pad_token=lowerCAmelCase, cls_token=lowerCAmelCase, mask_token=lowerCAmelCase, additional_special_tokens=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =3
lowerCamelCase_ =do_lower_case
lowerCamelCase_ =remove_space
lowerCamelCase_ =keep_accents
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowerCamelCase_ =jieba
lowerCamelCase_ =str.maketrans(''' \n''', '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if self.remove_space:
lowerCamelCase_ =''' '''.join(inputs.strip().split() )
else:
lowerCamelCase_ =inputs
lowerCamelCase_ =outputs.replace('''``''', '''"''' ).replace('''\'\'''', '''"''' )
if not self.keep_accents:
lowerCamelCase_ =unicodedata.normalize('''NFKD''', lowerCAmelCase )
lowerCamelCase_ =''''''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase )] )
if self.do_lower_case:
lowerCamelCase_ =outputs.lower()
return outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.preprocess_text(lowerCAmelCase )
lowerCamelCase_ =self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
lowerCamelCase_ =[]
for piece in pieces:
if len(lowerCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCamelCase_ =self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase, '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ =cur_pieces[1:]
else:
lowerCamelCase_ =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase )
else:
new_pieces.append(lowerCAmelCase )
return new_pieces
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =''''''.join(lowerCAmelCase ).replace(lowerCAmelCase, ''' ''' ).strip()
return out_string
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1, 1]
return ([0] * len(lowerCAmelCase )) + [1, 1]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =super()._decode(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =text.replace(''' ''', '''''' ).replace('''\u2582''', ''' ''' ).replace('''\u2583''', '''\n''' )
return text
| 676 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Union[tf.Tensor, np.ndarray] ) -> List[int]:
"""simple docstring"""
if isinstance(__snake_case , np.ndarray ):
return list(tensor.shape )
lowerCamelCase_ =tf.shape(__snake_case )
if tensor.shape == tf.TensorShape(__snake_case ):
return dynamic
lowerCamelCase_ =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__snake_case )]
def a_ ( __snake_case : tf.Tensor , __snake_case : Optional[int] = None , __snake_case : Optional[str] = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=__snake_case , name=__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any]=1e-5 , __snake_case : Union[str, Any]=-1 ) -> Any:
"""simple docstring"""
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__snake_case , __snake_case ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowerCamelCase_, lowerCamelCase_ =tf.nn.moments(__snake_case , axes=[axis] , keepdims=__snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCamelCase_ =[1] * inputs.shape.rank
lowerCamelCase_ =shape_list(__snake_case )[axis]
lowerCamelCase_ =tf.reshape(__snake_case , __snake_case )
lowerCamelCase_ =tf.reshape(__snake_case , __snake_case )
# Compute layer normalization using the batch_normalization
# function.
lowerCamelCase_ =tf.nn.batch_normalization(
__snake_case , __snake_case , __snake_case , offset=__snake_case , scale=__snake_case , variance_epsilon=__snake_case , )
return outputs
def a_ ( __snake_case : Union[str, Any] , __snake_case : str=0 , __snake_case : Optional[int]=-1 ) -> Tuple:
"""simple docstring"""
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCamelCase_ =tf.shape(__snake_case )
lowerCamelCase_ =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCamelCase_ =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__snake_case , __snake_case )
def a_ ( __snake_case : tf.Tensor ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(__snake_case , tf.Tensor ):
lowerCamelCase_ =tf.convert_to_tensor(__snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCamelCase_ =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCamelCase_ =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCamelCase_ =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def a_ ( __snake_case : tf.Tensor , __snake_case : int , __snake_case : str = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
__snake_case , tf.cast(__snake_case , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__snake_case )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def a_ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCamelCase_ =[x for x in data if len(__snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowerCamelCase_ =np.asarray(__snake_case )
lowerCamelCase_ =1
lowerCamelCase_ =np.array_split(__snake_case , __snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCamelCase_ =np.array_split(__snake_case , __snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__snake_case ):
lowerCamelCase_ =chunk_data
else:
lowerCamelCase_ =data
def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if name in group.attrs:
lowerCamelCase_ =[n.decode('''utf8''' ) if hasattr(__snake_case , '''decode''' ) else n for n in group.attrs[name]]
else:
lowerCamelCase_ =[]
lowerCamelCase_ =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__snake_case , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def a_ ( __snake_case : str ) -> Optional[int]:
"""simple docstring"""
def _expand_single_ad_tensor(__snake_case : int ):
if isinstance(__snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __snake_case )
| 676 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 1 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
a_ : Dict = logging.get_logger(__name__)
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =question_encoder
lowerCamelCase_ =generator
lowerCamelCase_ =self.question_encoder
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
lowerCamelCase_ =os.path.join(lowerCAmelCase, '''question_encoder_tokenizer''' )
lowerCamelCase_ =os.path.join(lowerCAmelCase, '''generator_tokenizer''' )
self.question_encoder.save_pretrained(lowerCAmelCase )
self.generator.save_pretrained(lowerCAmelCase )
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase )
if config is None:
lowerCamelCase_ =RagConfig.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =AutoTokenizer.from_pretrained(
lowerCAmelCase, config=config.question_encoder, subfolder='''question_encoder_tokenizer''' )
lowerCamelCase_ =AutoTokenizer.from_pretrained(
lowerCAmelCase, config=config.generator, subfolder='''generator_tokenizer''' )
return cls(question_encoder=lowerCAmelCase, generator=lowerCAmelCase )
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.current_tokenizer(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.generator.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.generator.decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.question_encoder
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.generator
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "longest", lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''', lowerCAmelCase, )
if max_length is None:
lowerCamelCase_ =self.current_tokenizer.model_max_length
lowerCamelCase_ =self(
lowerCAmelCase, add_special_tokens=lowerCAmelCase, return_tensors=lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, **lowerCAmelCase, )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase_ =self.current_tokenizer.model_max_length
lowerCamelCase_ =self(
text_target=lowerCAmelCase, add_special_tokens=lowerCAmelCase, return_tensors=lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, truncation=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =labels['''input_ids''']
return model_inputs
| 676 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : List[Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 676 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def a_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 |
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Optional[Any] =BertJapaneseTokenizer
lowercase : Dict =False
lowercase : int =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''こんにちは、世界。 \nこんばんは、世界。'''
lowerCamelCase_ ='''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.get_input_output_texts(lowerCAmelCase )
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase )
return text, ids
def lowercase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file )
lowerCamelCase_ =tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCAmelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file, word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCAmelCase )
lowerCamelCase_ ='''こんにちは、世界。\nこんばんは、世界。'''
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCAmelCase, '''wb''' ) as handle:
pickle.dump(lowerCAmelCase, lowerCAmelCase )
with open(lowerCAmelCase, '''rb''' ) as handle:
lowerCamelCase_ =pickle.load(lowerCAmelCase )
lowerCamelCase_ =tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self ):
"""simple docstring"""
try:
lowerCamelCase_ =MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self ):
"""simple docstring"""
try:
lowerCamelCase_ =MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MecabTokenizer(do_lower_case=lowerCAmelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self ):
"""simple docstring"""
try:
lowerCamelCase_ =MecabTokenizer(
do_lower_case=lowerCAmelCase, normalize_text=lowerCAmelCase, mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MecabTokenizer(normalize_text=lowerCAmelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''], )
@require_sudachi
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file, word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCAmelCase )
lowerCamelCase_ ='''こんにちは、世界。\nこんばんは、世界。'''
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCAmelCase, '''wb''' ) as handle:
pickle.dump(lowerCAmelCase, lowerCAmelCase )
with open(lowerCAmelCase, '''rb''' ) as handle:
lowerCamelCase_ =pickle.load(lowerCAmelCase )
lowerCamelCase_ =tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
@require_sudachi
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人''', '''参政権'''] )
@require_sudachi
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人参政権'''] )
@require_sudachi
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SudachiTokenizer(do_lower_case=lowerCAmelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SudachiTokenizer(normalize_text=lowerCAmelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SudachiTokenizer(trim_whitespace=lowerCAmelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
@require_jumanpp
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file, word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCAmelCase )
lowerCamelCase_ ='''こんにちは、世界。\nこんばんは、世界。'''
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCAmelCase, '''wb''' ) as handle:
pickle.dump(lowerCAmelCase, lowerCAmelCase )
with open(lowerCAmelCase, '''rb''' ) as handle:
lowerCamelCase_ =pickle.load(lowerCAmelCase )
lowerCamelCase_ =tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
@require_jumanpp
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =JumanppTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =JumanppTokenizer(normalize_text=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =JumanppTokenizer(trim_whitespace=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''], )
@require_jumanpp
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ), ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''], )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowerCamelCase_ ={}
for i, token in enumerate(lowerCAmelCase ):
lowerCamelCase_ =i
lowerCamelCase_ =WordpieceTokenizer(vocab=lowerCAmelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ), ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ), ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowerCamelCase_ =tokenizer.subword_tokenizer
lowerCamelCase_ =subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCAmelCase, ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowerCamelCase_ =subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCAmelCase, ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowerCamelCase_ =tokenizer.encode('''ありがとう。''', add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.encode('''どういたしまして。''', add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
lowerCamelCase_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase, lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : int =BertJapaneseTokenizer
lowercase : int =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='''character''', **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''こんにちは、世界。 \nこんばんは、世界。'''
lowerCamelCase_ ='''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file, subword_tokenizer_type='''character''' )
lowerCamelCase_ =tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCAmelCase, ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCamelCase_ ={}
for i, token in enumerate(lowerCAmelCase ):
lowerCamelCase_ =i
lowerCamelCase_ =CharacterTokenizer(vocab=lowerCAmelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowerCamelCase_ =tokenizer.encode('''ありがとう。''', add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.encode('''どういたしまして。''', add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
lowerCamelCase_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase, lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cl-tohoku/bert-base-japanese'''
lowerCamelCase_ =AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowerCamelCase_ ='''bert-base-cased'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
# TODO Update this
a_ : Dict = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='esm'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=1_026, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, mask_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =emb_layer_norm_before
lowerCamelCase_ =token_dropout
lowerCamelCase_ =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowerCamelCase_ =EsmFoldConfig()
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =EsmFoldConfig(**lowerCAmelCase )
lowerCamelCase_ =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowerCamelCase_ =get_default_vocab_list()
else:
lowerCamelCase_ =vocab_list
else:
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.esmfold_config is not None and getattr(self.esmfold_config, '''use_esm_attn_map''', lowerCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =super().to_dict()
if isinstance(self.esmfold_config, lowerCAmelCase ):
lowerCamelCase_ =self.esmfold_config.to_dict()
return output
@dataclass
class __UpperCamelCase :
lowercase : str =None
lowercase : bool =True
lowercase : bool =False
lowercase : bool =False
lowercase : bool =False
lowercase : float =0
lowercase : bool =True
lowercase : bool =False
lowercase : int =1_28
lowercase : "TrunkConfig" =None
def lowercase__ ( self ):
"""simple docstring"""
if self.trunk is None:
lowerCamelCase_ =TrunkConfig()
elif isinstance(self.trunk, lowerCAmelCase ):
lowerCamelCase_ =TrunkConfig(**self.trunk )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =asdict(self )
lowerCamelCase_ =self.trunk.to_dict()
return output
@dataclass
class __UpperCamelCase :
lowercase : int =48
lowercase : int =10_24
lowercase : int =1_28
lowercase : int =32
lowercase : int =32
lowercase : int =32
lowercase : float =0
lowercase : float =0
lowercase : bool =False
lowercase : int =4
lowercase : Optional[int] =1_28
lowercase : "StructureModuleConfig" =None
def lowercase__ ( self ):
"""simple docstring"""
if self.structure_module is None:
lowerCamelCase_ =StructureModuleConfig()
elif isinstance(self.structure_module, lowerCAmelCase ):
lowerCamelCase_ =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowerCamelCase_ =self.sequence_state_dim // self.sequence_head_width
lowerCamelCase_ =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =asdict(self )
lowerCamelCase_ =self.structure_module.to_dict()
return output
@dataclass
class __UpperCamelCase :
lowercase : int =3_84
lowercase : int =1_28
lowercase : int =16
lowercase : int =1_28
lowercase : int =12
lowercase : int =4
lowercase : int =8
lowercase : float =0.1
lowercase : int =8
lowercase : int =1
lowercase : int =2
lowercase : int =7
lowercase : int =10
lowercase : float =1E-8
lowercase : float =1E5
def lowercase__ ( self ):
"""simple docstring"""
return asdict(self )
def a_ ( ) -> List[Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 676 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a_ ( __snake_case : Optional[int] ) -> str:
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =metric_id
class __UpperCamelCase :
lowercase : Dict =[MetricMock(lowerCamelCase__ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowercase__ ( self ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def a_ ( __snake_case : int , __snake_case : Dict , __snake_case : List[str] , __snake_case : Any , __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
if "tmp_path" in args:
lowerCamelCase_ =tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__snake_case , match='''https://huggingface.co/docs/evaluate''' ):
func(*__snake_case )
| 676 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Any =['input_features', 'attention_mask']
def __init__( self, lowerCAmelCase=80, lowerCAmelCase=16_000, lowerCAmelCase=80, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(feature_size=lowerCAmelCase, sampling_rate=lowerCAmelCase, padding_value=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =num_mel_bins
lowerCamelCase_ =do_ceptral_normalize
lowerCamelCase_ =normalize_means
lowerCamelCase_ =normalize_vars
lowerCamelCase_ =True
def lowercase__ ( self, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 )
lowerCamelCase_ =ta_kaldi.fbank(lowerCAmelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 0.0, ):
"""simple docstring"""
if normalize_means:
lowerCamelCase_ =x[:input_length].mean(axis=0 )
lowerCamelCase_ =np.subtract(lowerCAmelCase, lowerCAmelCase )
if normalize_vars:
lowerCamelCase_ =x[:input_length].std(axis=0 )
lowerCamelCase_ =np.divide(lowerCAmelCase, lowerCAmelCase )
if input_length < x.shape[0]:
lowerCamelCase_ =padding_value
# make sure array is in float32
lowerCamelCase_ =x.astype(np.floataa )
return x
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase, lowerCAmelCase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(lowerCAmelCase, lowerCAmelCase )
]
def __call__( self, lowerCAmelCase, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase_ =isinstance(lowerCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ =is_batched_numpy or (
isinstance(lowerCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ =[np.asarray(lowerCAmelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase, np.ndarray ):
lowerCamelCase_ =np.asarray(lowerCAmelCase, dtype=np.floataa )
elif isinstance(lowerCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ =[raw_speech]
# extract fbank features
lowerCamelCase_ =[self._extract_fbank_features(lowerCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase_ =BatchFeature({'''input_features''': features} )
lowerCamelCase_ =self.pad(
lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, truncation=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
# make sure list is in array format
lowerCamelCase_ =padded_inputs.get('''input_features''' )
if isinstance(input_features[0], lowerCAmelCase ):
lowerCamelCase_ =[np.asarray(lowerCAmelCase, dtype=np.floataa ) for feature in input_features]
lowerCamelCase_ =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowerCamelCase_ =[np.asarray(lowerCAmelCase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase_ =(
np.array(lowerCAmelCase, dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase, max_length=lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase_ =self.normalize(
padded_inputs['''input_features'''], attention_mask=lowerCAmelCase )
if return_tensors is not None:
lowerCamelCase_ =padded_inputs.convert_to_tensors(lowerCAmelCase )
return padded_inputs
| 676 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ : List[str] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['pixel_values']
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = 32, lowerCAmelCase=PILImageResampling.BILINEAR, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize
lowerCamelCase_ =do_rescale
lowerCamelCase_ =size_divisor
lowerCamelCase_ =resample
super().__init__(**lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =get_image_size(lowerCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCamelCase_ =height // size_divisor * size_divisor
lowerCamelCase_ =width // size_divisor * size_divisor
lowerCamelCase_ =resize(lowerCAmelCase, (new_h, new_w), resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
return image
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase ):
"""simple docstring"""
return rescale(image=lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase=None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =size_divisor if size_divisor is not None else self.size_divisor
lowerCamelCase_ =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCamelCase_ =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for img in images]
if do_resize:
lowerCamelCase_ =[self.resize(lowerCAmelCase, size_divisor=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(lowerCAmelCase, scale=1 / 255 ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowerCamelCase_ ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 676 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
a_ : List[str] = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 676 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple =1
@register_to_config
def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) )
lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ =diffusion.unsqueeze(-1 )
lowerCamelCase_ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype )
lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ =step_size.unsqueeze(-1 )
lowerCamelCase_ =sample + step_size * model_output
lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase_ =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 676 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Dict =IFInpaintingPipeline
lowercase : str =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowercase : Union[str, Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : str =PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 676 |
'''simple docstring'''
from typing import List
import numpy as np
def a_ ( __snake_case : dict ) -> int:
"""simple docstring"""
lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
lowerCamelCase_ =max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def a_ ( __snake_case : int , __snake_case : int ) -> List[range]:
"""simple docstring"""
lowerCamelCase_ =[]
for group_idx in range(__snake_case ):
lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ =range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]:
"""simple docstring"""
lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def a_ ( __snake_case : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict:
"""simple docstring"""
lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
lowerCamelCase_ ={}
for size in list_sizes:
lowerCamelCase_ =list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ =dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 676 | 1 |
'''simple docstring'''
a_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_58_18,
}
def a_ ( __snake_case : str , __snake_case : str , __snake_case : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ =(
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : int = logging.getLogger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
lowerCamelCase_ =parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowerCamelCase_ =fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(__snake_case )} examples to process.''' )
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =1_0000
lowerCamelCase_ =time.time()
for text in data:
lowerCamelCase_ =F'''{bos} {text.strip()} {sep}'''
lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
rslt.append(__snake_case )
iter += 1
if iter % interval == 0:
lowerCamelCase_ =time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase_ =time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(__snake_case )} examples processed.''' )
lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase_ =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt]
else:
lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 676 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self, lowerCAmelCase = 768, ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
lowerCamelCase_ =nn.Parameter(torch.ones(1, lowerCAmelCase ) )
def lowercase__ ( self, lowerCAmelCase = None, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
lowerCamelCase_ =nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =(embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =(embeds * self.std) + self.mean
return embeds
| 676 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[int] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {"""vocab_file""": """spiece.model"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
a_ : List[Any] = {"""bert_for_seq_generation""": 5_12}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[int] =[]
lowercase : str =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 676 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : List[str] = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str ='beit'
def __init__( self, lowerCAmelCase=8_192, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=224, lowerCAmelCase=16, lowerCAmelCase=3, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=True, lowerCAmelCase=[3, 5, 7, 11], lowerCAmelCase=[1, 2, 3, 6], lowerCAmelCase=True, lowerCAmelCase=0.4, lowerCAmelCase=256, lowerCAmelCase=1, lowerCAmelCase=False, lowerCAmelCase=255, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =use_mask_token
lowerCamelCase_ =use_absolute_position_embeddings
lowerCamelCase_ =use_relative_position_bias
lowerCamelCase_ =use_shared_relative_position_bias
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase_ =out_indices
lowerCamelCase_ =pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ =use_auxiliary_head
lowerCamelCase_ =auxiliary_loss_weight
lowerCamelCase_ =auxiliary_channels
lowerCamelCase_ =auxiliary_num_convs
lowerCamelCase_ =auxiliary_concat_input
lowerCamelCase_ =semantic_loss_ignore_index
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 676 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Tuple = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : Dict=False ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase_ =[(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def a_ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=False ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase_ =''''''
else:
lowerCamelCase_ ='''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase_ =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ =in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ =in_proj_bias[: config.hidden_size]
lowerCamelCase_ =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ =in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ =in_proj_bias[-config.hidden_size :]
def a_ ( __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =dct.pop(__snake_case )
lowerCamelCase_ =val
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def a_ ( __snake_case : str , __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase_ =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase_ =1000
lowerCamelCase_ ='''huggingface/label-files'''
lowerCamelCase_ ='''imagenet-1k-id2label.json'''
lowerCamelCase_ =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
lowerCamelCase_ =int(deit_name[-6:-4] )
lowerCamelCase_ =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
lowerCamelCase_ =192
lowerCamelCase_ =768
lowerCamelCase_ =12
lowerCamelCase_ =3
elif deit_name[9:].startswith('''small''' ):
lowerCamelCase_ =384
lowerCamelCase_ =1536
lowerCamelCase_ =12
lowerCamelCase_ =6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
lowerCamelCase_ =1024
lowerCamelCase_ =4096
lowerCamelCase_ =24
lowerCamelCase_ =16
# load original model from timm
lowerCamelCase_ =timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ =timm_model.state_dict()
lowerCamelCase_ =create_rename_keys(__snake_case , __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
lowerCamelCase_ =DeiTForImageClassificationWithTeacher(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase_ =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase_ =DeiTImageProcessor(size=__snake_case , crop_size=config.image_size )
lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ =encoding['''pixel_values''']
lowerCamelCase_ =model(__snake_case )
lowerCamelCase_ =timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 676 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='new-model'
if is_tf_available():
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =NewModelConfig
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''bert-base-cased'''
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''bert-base-cased'''
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase, output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase, output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
@require_tensorflow_probability
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =copy.deepcopy(model.config )
lowerCamelCase_ =['''FunnelBaseModel''']
lowerCamelCase_ =TFAutoModel.from_config(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =TFAutoModel.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register('''new-model''', lowerCAmelCase )
lowerCamelCase_ =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCAmelCase ):
auto_class.register(lowerCAmelCase, lowerCAmelCase )
auto_class.register(lowerCAmelCase, lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
auto_class.register(lowerCAmelCase, lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ =BertModelTester(self ).get_config()
lowerCamelCase_ =NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ =auto_class.from_config(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =auto_class.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase, '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCamelCase_ =TFAutoModel.from_pretrained('''bert-base''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCamelCase_ =TFAutoModel.from_pretrained(lowerCAmelCase, revision='''aaaaaa''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase, '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''', ):
lowerCamelCase_ =TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase, '''Use `from_pt=True` to load this model''' ):
lowerCamelCase_ =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCamelCase_ =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
# With a sharded checkpoint
lowerCamelCase_ =TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
lowerCamelCase_ =TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 676 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ : Any = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =[file for file in os.listdir(lowerCAmelCase ) if os.path.isfile(os.path.join(lowerCAmelCase, lowerCAmelCase ) )]
if identifier is not None:
lowerCamelCase_ =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
for n_ in n_identifier:
lowerCamelCase_ =[file for file in files if n_ not in file]
else:
lowerCamelCase_ =[file for file in files if n_identifier not in file]
lowerCamelCase_ =ignore_files or []
ignore_files.append('''__init__.py''' )
lowerCamelCase_ =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''', lowerCAmelCase )
if only_modules:
lowerCamelCase_ =file.split('''.''' )[0]
try:
lowerCamelCase_ =getattr(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =doctest.DocTestSuite(lowerCAmelCase )
lowerCamelCase_ =unittest.TextTestRunner().run(lowerCAmelCase )
self.assertIs(len(result.failures ), 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
lowerCamelCase_ =doctest.testfile(str('''..''' / directory / file ), optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed, 0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Path('''src/transformers''' )
lowerCamelCase_ ='''modeling'''
lowerCamelCase_ =[
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(lowerCAmelCase, identifier=lowerCAmelCase, ignore_files=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Path('''src/transformers''' )
lowerCamelCase_ ='''tokenization'''
self.analyze_directory(lowerCAmelCase, identifier=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Path('''src/transformers''' )
lowerCamelCase_ ='''configuration'''
self.analyze_directory(lowerCAmelCase, identifier=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Path('''src/transformers''' )
lowerCamelCase_ =['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(lowerCAmelCase, n_identifier=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Path('''docs/source''' )
lowerCamelCase_ =['''favicon.ico''']
self.analyze_directory(lowerCAmelCase, ignore_files=lowerCAmelCase, only_modules=lowerCAmelCase )
| 676 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 1 |
'''simple docstring'''
def a_ ( __snake_case : list ) -> list:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
for _ in range(__snake_case ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCamelCase_, lowerCamelCase_ =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a_ : Optional[int] = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 676 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def a_ ( __snake_case : list[float] ) -> str:
"""simple docstring"""
return np.maximum(0 , __snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 676 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 676 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 | 1 |
'''simple docstring'''
from math import isqrt
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(__snake_case ) + 1 ) )
def a_ ( __snake_case : int = 10**6 ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
lowerCamelCase_ =1
lowerCamelCase_ =7
while prime_candidate < max_prime:
primes_count += is_prime(__snake_case )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 |
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
a_ : List[Any] = 1.6021e-19 # units = C
def a_ ( __snake_case : float , __snake_case : float , __snake_case : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def a_ ( __snake_case : List[str] ) -> List[str]:
"""simple docstring"""
return choice(__snake_case )
def a_ ( __snake_case : list[int] , __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =random_pivot(__snake_case )
# partition based on pivot
# linear time
lowerCamelCase_ =[e for e in lst if e < pivot]
lowerCamelCase_ =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__snake_case ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__snake_case ) < k - 1:
return kth_number(__snake_case , k - len(__snake_case ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ : List[str] = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =[any_type for _ in range(self.N )] + arr
lowerCamelCase_ =fnc
self.build()
def lowercase__ ( self ):
"""simple docstring"""
for p in range(self.N - 1, 0, -1 ):
lowerCamelCase_ =self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
p += self.N
lowerCamelCase_ =v
while p > 1:
lowerCamelCase_ =p // 2
lowerCamelCase_ =self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): # noqa: E741
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =l + self.N, r + self.N
lowerCamelCase_ =None
while l <= r:
if l % 2 == 1:
lowerCamelCase_ =self.st[l] if res is None else self.fn(lowerCAmelCase, self.st[l] )
if r % 2 == 0:
lowerCamelCase_ =self.st[r] if res is None else self.fn(lowerCAmelCase, self.st[r] )
lowerCamelCase_, lowerCamelCase_ =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ : int = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a_ : Any = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a_ : int = SegmentTree(test_array, min)
a_ : Dict = SegmentTree(test_array, max)
a_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def a_ ( ) -> None:
"""simple docstring"""
for i in range(len(__snake_case ) ):
for j in range(__snake_case , len(__snake_case ) ):
lowerCamelCase_ =reduce(__snake_case , test_array[i : j + 1] )
lowerCamelCase_ =reduce(__snake_case , test_array[i : j + 1] )
lowerCamelCase_ =reduce(lambda __snake_case , __snake_case : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__snake_case , __snake_case )
assert max_range == max_segment_tree.query(__snake_case , __snake_case )
assert sum_range == sum_segment_tree.query(__snake_case , __snake_case )
test_all_segments()
for index, value in test_updates.items():
a_ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 676 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a_ : List[str] = """pt"""
elif is_tf_available():
a_ : Dict = """tf"""
else:
a_ : Any = """jax"""
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : str =PerceiverTokenizer
lowercase : List[Any] =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=20, lowerCAmelCase=5 ):
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(len(lowerCAmelCase ) ):
try:
lowerCamelCase_ =tokenizer.decode([i], clean_up_tokenization_spaces=lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase_ =list(filter(lambda lowerCAmelCase : re.match(R'''^[ a-zA-Z]+$''', t[1] ), lowerCAmelCase ) )
lowerCamelCase_ =list(filter(lambda lowerCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCAmelCase ), lowerCAmelCase ) )
if max_length is not None and len(lowerCAmelCase ) > max_length:
lowerCamelCase_ =toks[:max_length]
if min_length is not None and len(lowerCAmelCase ) < min_length and len(lowerCAmelCase ) > 0:
while len(lowerCAmelCase ) < min_length:
lowerCamelCase_ =toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase_ =[t[0] for t in toks]
# Ensure consistency
lowerCamelCase_ =tokenizer.decode(lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase )
if " " not in output_txt and len(lowerCAmelCase ) > 1:
lowerCamelCase_ =(
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCAmelCase )
)
if with_prefix_space:
lowerCamelCase_ =''' ''' + output_txt
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
return output_txt, output_ids
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.perceiver_tokenizer
lowerCamelCase_ ='''Unicode €.'''
lowerCamelCase_ =tokenizer(lowerCAmelCase )
lowerCamelCase_ =[4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''], lowerCAmelCase )
# decoding
lowerCamelCase_ =tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase, '''[CLS]Unicode €.[SEP]''' )
lowerCamelCase_ =tokenizer('''e è é ê ë''' )
lowerCamelCase_ =[4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''], lowerCAmelCase )
# decoding
lowerCamelCase_ =tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase, '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ), '''[CLS]e è é ê ë[SEP]''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.perceiver_tokenizer
lowerCamelCase_ =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCamelCase_ =[4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCamelCase_ =tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
if FRAMEWORK != "jax":
lowerCamelCase_ =list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase_ =list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertEqual((2, 38), batch.input_ids.shape )
self.assertEqual((2, 38), batch.attention_mask.shape )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.perceiver_tokenizer
lowerCamelCase_ =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase_ =tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors=lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''', lowerCAmelCase )
self.assertIn('''attention_mask''', lowerCAmelCase )
self.assertNotIn('''decoder_input_ids''', lowerCAmelCase )
self.assertNotIn('''decoder_attention_mask''', lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.perceiver_tokenizer
lowerCamelCase_ =[
'''Summary of the text.''',
'''Another summary.''',
]
lowerCamelCase_ =tokenizer(
text_target=lowerCAmelCase, max_length=32, padding='''max_length''', truncation=lowerCAmelCase, return_tensors=lowerCAmelCase )
self.assertEqual(32, targets['''input_ids'''].shape[1] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowerCamelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =tokenizer.__class__.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =after_tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
lowerCamelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCamelCase_ =tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =tokenizer.__class__.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =after_tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertIn('''new_additional_special_token''', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowerCamelCase_ =tokenizer.__class__.from_pretrained(lowerCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase, '''special_tokens_map.json''' ), encoding='''utf-8''' ) as json_file:
lowerCamelCase_ =json.load(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase, '''tokenizer_config.json''' ), encoding='''utf-8''' ) as json_file:
lowerCamelCase_ =json.load(lowerCAmelCase )
lowerCamelCase_ =[f'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase_ =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCamelCase_ =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase, '''special_tokens_map.json''' ), '''w''', encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase, lowerCAmelCase )
with open(os.path.join(lowerCAmelCase, '''tokenizer_config.json''' ), '''w''', encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase, lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ =tokenizer_class.from_pretrained(
lowerCAmelCase, )
self.assertIn(
'''an_additional_special_token''', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ =added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''', lstrip=lowerCAmelCase )]
lowerCamelCase_ =tokenizer_class.from_pretrained(
lowerCAmelCase, additional_special_tokens=lowerCAmelCase, )
self.assertIn('''a_new_additional_special_token''', tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ), '''�''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(fast=lowerCAmelCase, do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_ =['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCamelCase_ =tokenizer.convert_tokens_to_string(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
| 676 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='yolos'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=[512, 864], lowerCAmelCase=16, lowerCAmelCase=3, lowerCAmelCase=True, lowerCAmelCase=100, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=0.1, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =qkv_bias
lowerCamelCase_ =num_detection_tokens
lowerCamelCase_ =use_mid_position_embeddings
lowerCamelCase_ =auxiliary_loss
# Hungarian matcher
lowerCamelCase_ =class_cost
lowerCamelCase_ =bbox_cost
lowerCamelCase_ =giou_cost
# Loss coefficients
lowerCamelCase_ =bbox_loss_coefficient
lowerCamelCase_ =giou_loss_coefficient
lowerCamelCase_ =eos_coefficient
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 12
| 676 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.