code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : dict ) ->List[Any]:
'''simple docstring'''
a : List[Any] = BeautifulSoup(requests.get(lowerCAmelCase_ , params=lowerCAmelCase_ ).content , "html.parser" )
a : List[Any] = soup.find("div" , attrs={"class": "gs_ri"} )
a : Optional[Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
a : List[str] = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 633 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ : List[Any] =False
lowerCAmelCase__ : str =logging.get_logger(__name__)
lowerCAmelCase__ : List[str] ='ybelkada/fonts'
def a__ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'Pix2StructImageProcessor. Please upgrade torch.' )
def a__ ( A__, A__, A__ ):
requires_backends(lowerCAmelCase_, ['torch'] )
_check_torch_version()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_tensor.unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.nn.functional.unfold(lowerCAmelCase_, (patch_height, patch_width), stride=(patch_height, patch_width) )
SCREAMING_SNAKE_CASE_ : Any = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), lowerCAmelCase_, lowerCAmelCase_, -1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def a__ ( A__, A__ = 3_6, A__ = "black", A__ = "white", A__ = 5, A__ = 5, A__ = 5, A__ = 5, A__ = None, A__ = None, ):
requires_backends(lowerCAmelCase_, 'vision' )
# Add new lines so that each line is no more than 80 characters.
SCREAMING_SNAKE_CASE_ : Optional[Any] = textwrap.TextWrapper(width=8_0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = wrapper.wrap(text=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "\n".join(lowerCAmelCase_ )
if font_bytes is not None and font_path is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = io.BytesIO(lowerCAmelCase_ )
elif font_path is not None:
SCREAMING_SNAKE_CASE_ : str = font_path
else:
SCREAMING_SNAKE_CASE_ : Tuple = hf_hub_download(lowerCAmelCase_, 'Arial.TTF' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ImageFont.truetype(lowerCAmelCase_, encoding='UTF-8', size=lowerCAmelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
SCREAMING_SNAKE_CASE_ : Dict = ImageDraw.Draw(Image.new('RGB', (1, 1), lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = temp_draw.textbbox((0, 0), lowerCAmelCase_, lowerCAmelCase_ )
# Create the actual image with a bit of padding around the text.
SCREAMING_SNAKE_CASE_ : Dict = text_width + left_padding + right_padding
SCREAMING_SNAKE_CASE_ : Optional[int] = text_height + top_padding + bottom_padding
SCREAMING_SNAKE_CASE_ : str = Image.new('RGB', (image_width, image_height), lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int = ImageDraw.Draw(lowerCAmelCase_ )
draw.text(xy=(left_padding, top_padding), text=lowerCAmelCase_, fill=lowerCAmelCase_, font=lowerCAmelCase_ )
return image
def a__ ( A__, A__, **A__ ):
requires_backends(lowerCAmelCase_, 'vision' )
# Convert to PIL image if necessary
SCREAMING_SNAKE_CASE_ : List[str] = to_pil_image(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = render_text(lowerCAmelCase_, **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(header_image.width, image.width )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(image.height * (new_width / image.width) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(header_image.height * (new_width / header_image.width) )
SCREAMING_SNAKE_CASE_ : Any = Image.new('RGB', (new_width, new_height + new_header_height), 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
SCREAMING_SNAKE_CASE_ : str = to_numpy_array(lowerCAmelCase_ )
if infer_channel_dimension_format(lowerCAmelCase_ ) == ChannelDimension.LAST:
SCREAMING_SNAKE_CASE_ : Tuple = to_channel_dimension_format(lowerCAmelCase_, ChannelDimension.LAST )
return new_image
class __lowercase (_UpperCamelCase ):
"""simple docstring"""
_UpperCAmelCase = ["flattened_patches"]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 2_0_4_8 , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = do_convert_rgb
SCREAMING_SNAKE_CASE_ : List[str] = max_patches
SCREAMING_SNAKE_CASE_ : Tuple = is_vqa
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
SCREAMING_SNAKE_CASE_ : Optional[int] = to_channel_dimension_format(A__ , ChannelDimension.FIRST )
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size["height"], patch_size["width"]
SCREAMING_SNAKE_CASE_ : Tuple = get_image_size(A__ )
# maximize scale s.t.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
SCREAMING_SNAKE_CASE_ : str = max(min(math.floor(scale * image_height / patch_height ) , A__ ) , 1 )
SCREAMING_SNAKE_CASE_ : List[str] = max(min(math.floor(scale * image_width / patch_width ) , A__ ) , 1 )
SCREAMING_SNAKE_CASE_ : str = max(num_feasible_rows * patch_height , 1 )
SCREAMING_SNAKE_CASE_ : Any = max(num_feasible_cols * patch_width , 1 )
SCREAMING_SNAKE_CASE_ : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=A__ , antialias=A__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE_ : str = torch_extract_patches(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patches.shape
SCREAMING_SNAKE_CASE_ : Tuple = patches_shape[1]
SCREAMING_SNAKE_CASE_ : Dict = patches_shape[2]
SCREAMING_SNAKE_CASE_ : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE_ : str = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.arange(A__ ).reshape([rows, 1] ).repeat(1 , A__ ).reshape([rows * columns, 1] )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.arange(A__ ).reshape([1, columns] ).repeat(A__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = row_ids.to(torch.floataa )
SCREAMING_SNAKE_CASE_ : str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE_ : str = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.functional.pad(A__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(A__ )
return result
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ):
"""simple docstring"""
if image.dtype == np.uinta:
SCREAMING_SNAKE_CASE_ : str = image.astype(np.floataa )
# take mean across the whole `image`
SCREAMING_SNAKE_CASE_ : List[str] = np.mean(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.std(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = max(A__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(A__ , mean=A__ , std=A__ , **A__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : List[Any] = patch_size if patch_size is not None else self.patch_size
SCREAMING_SNAKE_CASE_ : str = max_patches if max_patches is not None else self.max_patches
SCREAMING_SNAKE_CASE_ : str = self.is_vqa
if kwargs.get('data_format' , A__ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
SCREAMING_SNAKE_CASE_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : int = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Tuple = [to_numpy_array(A__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('font_bytes' , A__ )
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('font_path' , A__ )
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = [header_text] * len(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
render_header(A__ , header_text[i] , font_bytes=A__ , font_path=A__ )
for i, image in enumerate(A__ )
]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Any = [self.normalize(image=A__ ) for image in images]
# convert to torch tensor and permute
SCREAMING_SNAKE_CASE_ : List[str] = [
self.extract_flattened_patches(image=A__ , max_patches=A__ , patch_size=A__ )
for image in images
]
# create attention mask in numpy
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=A__ )
return encoded_outputs
| 101 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 0 |
'''simple docstring'''
def snake_case ( a_ : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
UpperCamelCase_ : Optional[Any] = hex_num[0] == "-"
if is_negative:
UpperCamelCase_ : List[str] = hex_num[1:]
try:
UpperCamelCase_ : List[Any] = int(lowerCAmelCase_ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
UpperCamelCase_ : Optional[Any] = ""
while int_num > 0:
UpperCamelCase_ : List[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
set_seed(7_70)
lowerCamelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
lowerCamelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
lowerCamelCase = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase = os.path.join(os.path.expanduser('~'), '.cache')
lowerCamelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Dict="text" ):
'''simple docstring'''
if model_type == "text":
_lowerCamelCase : int =BarkSemanticModel
_lowerCamelCase : str =BarkSemanticConfig
_lowerCamelCase : Optional[Any] =BarkSemanticGenerationConfig
elif model_type == "coarse":
_lowerCamelCase : str =BarkCoarseModel
_lowerCamelCase : Optional[int] =BarkCoarseConfig
_lowerCamelCase : Any =BarkCoarseGenerationConfig
elif model_type == "fine":
_lowerCamelCase : Optional[int] =BarkFineModel
_lowerCamelCase : Tuple =BarkFineConfig
_lowerCamelCase : List[str] =BarkFineGenerationConfig
else:
raise NotImplementedError()
_lowerCamelCase : Optional[Any] =F'''{model_type}_small''' if use_small else model_type
_lowerCamelCase : Any =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
_lowerCamelCase : Any =torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
_lowerCamelCase : Union[str, Any] =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_lowerCamelCase : str =model_args["vocab_size"]
_lowerCamelCase : Union[str, Any] =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_lowerCamelCase : Union[str, Any] =model_args.pop('n_head' )
_lowerCamelCase : int =model_args.pop('n_embd' )
_lowerCamelCase : Any =model_args.pop('n_layer' )
_lowerCamelCase : List[str] =ConfigClass(**checkpoint['model_args'] )
_lowerCamelCase : Optional[Any] =ModelClass(config=lowerCAmelCase_ )
_lowerCamelCase : Tuple =GenerationConfigClass()
_lowerCamelCase : List[str] =model_generation_config
_lowerCamelCase : Optional[int] =checkpoint["model"]
# fixup checkpoint
_lowerCamelCase : Optional[int] ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_lowerCamelCase : Tuple =k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_lowerCamelCase : int =new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
_lowerCamelCase : int =state_dict.pop(lowerCAmelCase_ )
_lowerCamelCase : Optional[int] =set(state_dict.keys() ) - set(model.state_dict().keys() )
_lowerCamelCase : str ={k for k in extra_keys if not k.endswith('.attn.bias' )}
_lowerCamelCase : Any =set(model.state_dict().keys() ) - set(state_dict.keys() )
_lowerCamelCase : List[Any] ={k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
_lowerCamelCase : str =model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
_lowerCamelCase : Union[str, Any] =checkpoint["best_val_loss"].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss''' )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : int="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_lowerCamelCase : int ="cpu" # do conversion on cpu
_lowerCamelCase : Optional[Any] =_get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
_lowerCamelCase : Tuple =_load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
_lowerCamelCase : int =_bark_load_model(lowerCAmelCase_ , 'cpu' , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
_lowerCamelCase : Union[str, Any] =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_lowerCamelCase : Optional[Any] =5
_lowerCamelCase : Optional[int] =10
if model_type in ["text", "coarse"]:
_lowerCamelCase : Optional[Any] =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_lowerCamelCase : str =bark_model(lowerCAmelCase_ )[0]
_lowerCamelCase : Tuple =model(lowerCAmelCase_ )
# take last logits
_lowerCamelCase : List[str] =output_new_model_total.logits[:, [-1], :]
else:
_lowerCamelCase : Optional[int] =3
_lowerCamelCase : str =8
_lowerCamelCase : List[str] =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_lowerCamelCase : Any =model(lowerCAmelCase_ , lowerCAmelCase_ )
_lowerCamelCase : Union[str, Any] =bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
_lowerCamelCase : Optional[int] =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_lowerCamelCase : Optional[Any] =BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_lowerCamelCase : List[Any] =BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_lowerCamelCase : List[str] =BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_lowerCamelCase : List[Any] =EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_lowerCamelCase : List[str] =BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
_lowerCamelCase : Optional[Any] =BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
_lowerCamelCase : Tuple =BarkFineModel.from_pretrained(lowerCAmelCase_ )
_lowerCamelCase : Union[str, Any] =EncodecModel.from_pretrained('facebook/encodec_24khz' )
_lowerCamelCase : Tuple =BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowerCamelCase : List[Any] =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_lowerCamelCase : Optional[int] =BarkModel(lowerCAmelCase_ )
_lowerCamelCase : int =semantic
_lowerCamelCase : List[str] =coarseAcoustic
_lowerCamelCase : str =fineAcoustic
_lowerCamelCase : Optional[Any] =codec
_lowerCamelCase : Any =bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
lowerCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 464 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 0 |
"""simple docstring"""
from __future__ import annotations
class __magic_name__ :
def __init__( self : Union[str, Any] , snake_case_ : int = 0 ):
__snake_case = key
def lowerCAmelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : int ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A__ ) ^ key ) for ch in content]
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : int ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A__ ) ^ key ) for ch in content]
def lowerCAmelCase ( self : str , snake_case_ : str , snake_case_ : int = 0 ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__snake_case = ""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def lowerCAmelCase ( self : Dict , snake_case_ : str , snake_case_ : int = 0 ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__snake_case = ""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def lowerCAmelCase ( self : Any , snake_case_ : str , snake_case_ : int = 0 ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A__ , A__ ) )
except OSError:
return False
return True
def lowerCAmelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : int ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A__ , A__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 163 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _a ( _UpperCamelCase ):
a_ : torch.FloatTensor
class _a ( _UpperCamelCase , _UpperCamelCase ):
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 6_55_36 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : str = "fourier" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE__ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : bool = False , ):
super().__init__()
lowerCamelCase__ = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A__ , log=A__ , flip_sin_to_cos=A__ )
lowerCamelCase__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A__ , downscale_freq_shift=A__ )
lowerCamelCase__ = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ = block_out_channels[0] * 4
lowerCamelCase__ = TimestepEmbedding(
in_channels=A__ , time_embed_dim=A__ , act_fn=A__ , out_dim=block_out_channels[0] , )
lowerCamelCase__ = nn.ModuleList([] )
lowerCamelCase__ = None
lowerCamelCase__ = nn.ModuleList([] )
lowerCamelCase__ = None
# down
lowerCamelCase__ = in_channels
for i, down_block_type in enumerate(A__ ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ = i == len(A__ ) - 1
lowerCamelCase__ = get_down_block(
A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A__ )
# mid
lowerCamelCase__ = get_mid_block(
A__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A__ , add_downsample=A__ , )
# up
lowerCamelCase__ = list(reversed(A__ ) )
lowerCamelCase__ = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ = out_channels
else:
lowerCamelCase__ = block_out_channels[0]
for i, up_block_type in enumerate(A__ ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = (
reversed_block_out_channels[i + 1] if i < len(A__ ) - 1 else final_upsample_channels
)
lowerCamelCase__ = i == len(A__ ) - 1
lowerCamelCase__ = get_up_block(
A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A__ )
lowerCamelCase__ = output_channel
# out
lowerCamelCase__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ = get_out_block(
out_block_type=A__ , num_groups_out=A__ , embed_dim=block_out_channels[0] , out_channels=A__ , act_fn=A__ , fc_dim=block_out_channels[-1] // 4 , )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE__ : bool = True , ):
lowerCamelCase__ = timestep
if not torch.is_tensor(A__ ):
lowerCamelCase__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
lowerCamelCase__ = timesteps[None].to(sample.device )
lowerCamelCase__ = self.time_proj(A__ )
if self.config.use_timestep_embedding:
lowerCamelCase__ = self.time_mlp(A__ )
else:
lowerCamelCase__ = timestep_embed[..., None]
lowerCamelCase__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ = downsample_block(hidden_states=A__ , temb=A__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ = self.mid_block(A__ , A__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ = down_block_res_samples[-1:]
lowerCamelCase__ = down_block_res_samples[:-1]
lowerCamelCase__ = upsample_block(A__ , res_hidden_states_tuple=A__ , temb=A__ )
# 5. post-process
if self.out_block:
lowerCamelCase__ = self.out_block(A__ , A__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A__ )
| 510 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : str = len(lowerCAmelCase_)
A_ : Optional[int] = len(lowerCAmelCase_)
A_ : Any = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCAmelCase_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[Any] = True
if a[i].islower():
A_ : Optional[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 0 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
# ===== initialization =====
UpperCAmelCase__ : Dict = Mock()
UpperCAmelCase__ : Optional[int] = conn, Mock()
UpperCAmelCase__ : Union[str, Any] = iter([1, None] )
UpperCAmelCase__ : int = lambda UpperCamelCase__ : next(lowerCAmelCase_ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once() | 407 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
from __future__ import annotations
import bisect
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = -1 ) -> Optional[int]:
if hi < 0:
__lowerCamelCase = len(lowerCAmelCase_ )
while lo < hi:
__lowerCamelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCamelCase = mid + 1
else:
__lowerCamelCase = mid
return lo
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = -1 ) -> Optional[Any]:
if hi < 0:
__lowerCamelCase = len(lowerCAmelCase_ )
while lo < hi:
__lowerCamelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCamelCase = mid + 1
else:
__lowerCamelCase = mid
return lo
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = -1 ) -> Tuple:
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = -1 ) -> Union[str, Any]:
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> Optional[Any]:
__lowerCamelCase = 0
__lowerCamelCase = len(lowerCAmelCase_ ) - 1
while left <= right:
__lowerCamelCase = left + (right - left) // 2
__lowerCamelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCamelCase = midpoint - 1
else:
__lowerCamelCase = midpoint + 1
return None
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> Tuple:
__lowerCamelCase = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
if right < left:
return None
__lowerCamelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = input("Enter numbers separated by comma:\n").strip()
SCREAMING_SNAKE_CASE__ : Any = sorted(int(item) for item in user_input.split(","))
SCREAMING_SNAKE_CASE__ : List[str] = int(input("Enter a single number to be found in the list:\n"))
SCREAMING_SNAKE_CASE__ : Dict = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 298 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
lowerCamelCase : str = tf.data.AUTOTUNE
def snake_case_ ( ):
__lowercase : List[str] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase_ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase_ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase_ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase_ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase_ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase_ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase_ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase_ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase_ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase_ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase_ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase_ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase_ , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase_ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase_ , help="""Model ID to upload to on the Hugging Face Hub.""" )
__lowercase : Tuple = parser.parse_args()
return args
def snake_case_ ( lowerCAmelCase_ : Tuple ):
try:
if args.tpu_name:
__lowercase : Dict = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowercase : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase_ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase_ )
return tpu
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Dict = 0
for file in file_list:
__lowercase : Dict = file.split("""/""" )[-1]
__lowercase : int = re.search(r"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase_ ).group(1 )
__lowercase : Tuple = int(lowerCAmelCase_ )
num_samples += sample_count
return num_samples
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=None ):
__lowercase : Optional[int] = count_samples(lowerCAmelCase_ )
__lowercase : Dict = tf.data.Dataset.from_tensor_slices(lowerCAmelCase_ )
if shuffle:
__lowercase : List[Any] = dataset.shuffle(len(lowerCAmelCase_ ) )
__lowercase : Optional[int] = tf.data.TFRecordDataset(lowerCAmelCase_ , num_parallel_reads=lowerCAmelCase_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowercase : str = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase_ ) )
__lowercase : List[str] = dataset.map(lowerCAmelCase_ , num_parallel_calls=lowerCAmelCase_ )
if shuffle:
assert shuffle_buffer_size is not None
__lowercase : Union[str, Any] = dataset.shuffle(args.shuffle_buffer_size )
__lowercase : Any = dataset.batch(lowerCAmelCase_ , drop_remainder=lowerCAmelCase_ )
__lowercase : List[Any] = dataset.map(lowerCAmelCase_ , num_parallel_calls=lowerCAmelCase_ )
__lowercase : str = dataset.prefetch(lowerCAmelCase_ )
return dataset
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
if not args.no_tpu:
__lowercase : str = initialize_tpu(lowerCAmelCase_ )
__lowercase : Any = tf.distribute.TPUStrategy(lowerCAmelCase_ )
else:
__lowercase : Union[str, Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
__lowercase : Any = AutoTokenizer.from_pretrained(args.tokenizer )
__lowercase : Union[str, Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
__lowercase : str = tokenizer.vocab_size
__lowercase : Any = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
__lowercase : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
__lowercase : Optional[Any] = count_samples(lowerCAmelCase_ )
__lowercase : Tuple = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowercase : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
__lowercase : List[str] = TFAutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowercase : int = create_optimizer(
num_train_steps=lowerCAmelCase_ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase_ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase_ : Tuple ):
__lowercase : List[Any] = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase_ , lowerCAmelCase_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowercase : int = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase_ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase_ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase_ : Any ):
# TF really needs an isin() function
__lowercase : List[Any] = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
__lowercase : Dict = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase_ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase_ , )
return batch
__lowercase : Dict = args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowercase : str = prepare_dataset(
lowerCAmelCase_ , decode_fn=lowerCAmelCase_ , mask_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowercase : Union[str, Any] = prepare_dataset(
lowerCAmelCase_ , decode_fn=lowerCAmelCase_ , mask_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
__lowercase : str = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase_ ) )
model.fit(
lowerCAmelCase_ , validation_data=lowerCAmelCase_ , epochs=args.num_epochs , callbacks=lowerCAmelCase_ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase : List[str] = parse_args()
main(args) | 149 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , __A : List[Any] , __A : int=7 , __A : Union[str, Any]=3 , __A : List[str]=30 , __A : Optional[int]=400 , __A : Optional[Any]=True , __A : Optional[int]=None , __A : Optional[Any]=True , __A : Any=[0.5, 0.5, 0.5] , __A : int=[0.5, 0.5, 0.5] , __A : Any=True , __A : int=1 / 255 , __A : List[str]=True , ):
__A : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__A : Any = parent
__A : Optional[int] = batch_size
__A : List[Any] = num_channels
__A : Union[str, Any] = min_resolution
__A : List[Any] = max_resolution
__A : Tuple = do_resize
__A : Dict = size
__A : Optional[Any] = do_normalize
__A : int = image_mean
__A : List[Any] = image_std
__A : Tuple = do_rescale
__A : Any = rescale_factor
__A : Optional[int] = do_pad
def lowerCAmelCase_ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[int] , __A : Optional[int] , __A : Any=False ):
if not batched:
__A : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
__A : Dict = image.size
else:
__A : int = image.shape[1], image.shape[2]
if w < h:
__A : Dict = int(self.size["""shortest_edge"""] * h / w )
__A : Optional[int] = self.size["shortest_edge"]
elif w > h:
__A : Optional[int] = self.size["shortest_edge"]
__A : str = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Optional[int] = self.size["shortest_edge"]
__A : List[Any] = self.size["shortest_edge"]
else:
__A : str = []
for image in image_inputs:
__A : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[Any] = max(A__ , key=lambda __A : item[0] )[0]
__A : int = max(A__ , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _UpperCamelCase , unittest.TestCase ):
_lowercase : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Tuple ):
__A : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Any ):
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
def lowerCAmelCase_ ( self : List[str] ):
__A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , A__ )
__A : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Dict ):
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
__A : int = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : int ):
__A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
__A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
__A : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Tuple ):
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Any = image_processing(A__ , return_tensors="""pt""" ).pixel_values
__A : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : List[str] ):
__A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : int = {"image_id": 3_9769, "annotations": target}
# encode them
__A : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
__A : Any = image_processing(images=A__ , annotations=A__ , return_tensors="""pt""" )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , A__ )
__A : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A__ , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A__ ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A__ )
__A : str = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A__ , atol=1e-3 ) )
# verify image_id
__A : List[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A__ ) )
# verify is_crowd
__A : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A__ ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A__ ) )
# verify orig_size
__A : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A__ ) )
# verify size
__A : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A__ ) )
@slow
def lowerCAmelCase_ ( self : int ):
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
__A : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Union[str, Any] = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
__A : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="""pt""" )
# verify pixel values
__A : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , A__ )
__A : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A__ , atol=1e-4 ) )
# verify area
__A : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A__ ) )
# verify boxes
__A : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A__ )
__A : str = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A__ , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A__ ) )
# verify is_crowd
__A : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A__ ) )
# verify class_labels
__A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A__ ) )
# verify masks
__A : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A__ )
# verify orig_size
__A : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A__ ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A__ ) )
| 17 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a : Tuple = logging.get_logger(__name__)
a : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
a : Dict = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : List[Any] ) ->str:
'''simple docstring'''
for attribute in key.split("." ):
a : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
a : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
a : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a : Optional[Any] = value
elif weight_type == "weight_g":
a : Tuple = value
elif weight_type == "weight_v":
a : List[Any] = value
elif weight_type == "bias":
a : Tuple = value
elif weight_type == "running_mean":
a : str = value
elif weight_type == "running_var":
a : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
a : Any = value
elif weight_type == "inv_freq":
a : Any = value
else:
a : Optional[int] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : List[str] , _lowercase : str ) ->Dict:
'''simple docstring'''
a : Union[str, Any] = []
a : List[Any] = fairseq_model.state_dict()
a : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
a : Dict = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
a : Any = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a : int = True
if "*" in mapped_key:
a : int = name.split(lowerCAmelCase_ )[0].split("." )[-2]
a : str = mapped_key.replace("*" , lowerCAmelCase_ )
if "pos_bias_u" in name:
a : Union[str, Any] = None
elif "pos_bias_v" in name:
a : str = None
elif "weight_g" in name:
a : Any = "weight_g"
elif "weight_v" in name:
a : Optional[Any] = "weight_v"
elif "bias" in name:
a : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a : Dict = "weight"
elif "running_mean" in name:
a : Dict = "running_mean"
elif "inv_freq" in name:
a : List[Any] = "inv_freq"
elif "running_var" in name:
a : Optional[Any] = "running_var"
elif "num_batches_tracked" in name:
a : Any = "num_batches_tracked"
else:
a : Union[str, Any] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Dict ) ->Any:
'''simple docstring'''
a : Dict = full_name.split("conv_layers." )[-1]
a : List[Any] = name.split("." )
a : Dict = int(items[0] )
a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Optional[int] , _lowercase : List[Any]=None , _lowercase : Optional[int]=None , _lowercase : List[Any]=True ) ->str:
'''simple docstring'''
if config_path is not None:
a : Optional[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="swish" )
else:
a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
a : Union[str, Any] = "rotary"
if is_finetuned:
if dict_path:
a : Dict = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a : List[str] = target_dict.pad_index
a : List[Any] = target_dict.bos_index
a : str = target_dict.eos_index
a : Optional[int] = len(target_dict.symbols )
a : Optional[int] = os.path.join(lowerCAmelCase_ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
a : List[Any] = 0
a : Dict = 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
a : List[str] = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase_ , )
a : Any = True if config.feat_extract_norm == "layer" else False
a : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
a : List[Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
a : List[str] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
a : List[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
a : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a : Optional[Any] = argparse.Namespace(task="audio_pretraining" )
a : Union[str, Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
a : Optional[Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 633 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : int , A__ : Optional[int] , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Any , A__ : List[str] , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case__ ( _UpperCamelCase ):
@require_beam
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Tuple = beam.io.parquetio.WriteToParquet
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 666 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase__ : Any ={
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase__ : List[str] ={
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase__ : Optional[int] ='▁'
class __lowercase (_UpperCamelCase ):
"""simple docstring"""
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
SCREAMING_SNAKE_CASE_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
SCREAMING_SNAKE_CASE_ : List[str] = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1, 1] + ([0] * len(A__ )) + [1]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(A__ )
return spm_id if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : int = ""
SCREAMING_SNAKE_CASE_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
else:
current_sub_tokens.append(A__ )
SCREAMING_SNAKE_CASE_ : Dict = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
return state
def __setstate__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(A__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ : str = os.path.join(
A__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 101 | import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase ={
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 | from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Any = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ : Tuple = mid + 1
else:
snake_case_ : Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Optional[Any] = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ : Optional[Any] = mid + 1
else:
snake_case_ : Tuple = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Dict = 0
snake_case_ : Tuple = len(lowerCAmelCase_ ) - 1
while left <= right:
snake_case_ : int = left + (right - left) // 2
snake_case_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ : Optional[Any] = midpoint - 1
else:
snake_case_ : Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Optional[int] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if right < left:
return None
snake_case_ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 666 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] =_distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple =_split_gen_kwargs(lowerCAmelCase_ , lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
_lowerCamelCase : Any =_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 464 | import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A__ : VQModel , A__ : UNetaDModel , A__ : DDIMScheduler ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : str , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : float = 0.0 , A__ : int = 50 , A__ : Optional[str] = "pil" , A__ : bool = True , **A__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , )
snake_case_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : List[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case_ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VAE
snake_case_ : int = self.vqvae.decode(A__ ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 666 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Tuple , *snake_case_ : List[str] , **snake_case_ : str ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , A__ , )
super().__init__(*A__ , **A__ )
| 163 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 0 |
"""simple docstring"""
def snake_case ( _a: int , _a: Tuple )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case ( _a: Tuple , _a: Tuple , _a: int )-> Any:
'''simple docstring'''
lowerCamelCase__ = 0
while b > 0:
if b & 1:
lowerCamelCase__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 510 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | 0 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ = get_tests_dir('fixtures/dummy-config.json')
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : str ):
'''simple docstring'''
A_ : int = 0
def _a ( self : Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Any = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(A__ ,A__ )
def _a ( self : int ):
'''simple docstring'''
A_ : Dict = AutoConfig.from_pretrained(A__ )
self.assertIsInstance(A__ ,A__ )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = AutoConfig.from_pretrained(A__ )
self.assertIsInstance(A__ ,A__ )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(A__ ,A__ )
def _a ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A_ : str = os.path.join(A__ ,"""fake-roberta""" )
os.makedirs(A__ ,exist_ok=A__ )
with open(os.path.join(A__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
A_ : Optional[int] = AutoConfig.from_pretrained(A__ )
self.assertEqual(type(A__ ) ,A__ )
def _a ( self : List[str] ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,A__ )
# Wrong model type will raise an error
with self.assertRaises(A__ ):
AutoConfig.register("""model""" ,A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoConfig.register("""bert""" ,A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A_ : str = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ )
A_ : Any = AutoConfig.from_pretrained(A__ )
self.assertIsInstance(A__ ,A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _a ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
A__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
A_ : Union[str, Any] = AutoConfig.from_pretrained("""bert-base""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
A__ ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : List[Any] = AutoConfig.from_pretrained(A__ ,revision="""aaaaaa""" )
def _a ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
A__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
A_ : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def _a ( self : str ):
'''simple docstring'''
with self.assertRaises(A__ ):
A_ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
A_ : Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=A__ )
A_ : Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=A__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ )
A_ : Tuple = AutoConfig.from_pretrained(A__ ,trust_remote_code=A__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
a_ = "new-model"
try:
AutoConfig.register("""new-model""" ,A__ )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
A_ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=A__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
A_ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=A__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 665 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | 0 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = []
for part_id in partition_order:
UpperCAmelCase__ : int = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowerCAmelCase_ ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ( ):
UpperCAmelCase__ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase__ : List[Any] = spark.range(1_0_0 ).repartition(1 )
UpperCAmelCase__ : Optional[Any] = Spark(lowerCAmelCase_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase__ : Optional[int] = spark.range(1_0 ).repartition(2 )
UpperCAmelCase__ : List[str] = [1, 0]
UpperCAmelCase__ : Optional[int] = _generate_iterable_examples(lowerCAmelCase_ , lowerCAmelCase_ ) # Reverse the partitions.
UpperCAmelCase__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , lowerCAmelCase_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ( ):
UpperCAmelCase__ : int = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase__ : Tuple = spark.range(1_0 ).repartition(1 )
UpperCAmelCase__ : Any = SparkExamplesIterable(lowerCAmelCase_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ( ):
UpperCAmelCase__ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase__ : Optional[int] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
UpperCAmelCase__ : Optional[int] = lambda UpperCamelCase__ : x.reverse()
UpperCAmelCase__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [2, 1, 0] )
UpperCAmelCase__ : Any = SparkExamplesIterable(lowerCAmelCase_ ).shuffle_data_sources(lowerCAmelCase_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
UpperCAmelCase__ : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase__ : str = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase__ : Optional[Any] = SparkExamplesIterable(lowerCAmelCase_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
UpperCAmelCase__ : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase__ : str = SparkExamplesIterable(lowerCAmelCase_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
UpperCAmelCase__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ( ):
UpperCAmelCase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase__ : str = spark.range(1_0_0 ).repartition(1 )
UpperCAmelCase__ : Union[str, Any] = Spark(lowerCAmelCase_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0 | 407 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
SCREAMING_SNAKE_CASE__ : Dict = "bert-base-cased"
SCREAMING_SNAKE_CASE__ : str = "google/pegasus-xsum"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
SCREAMING_SNAKE_CASE__ : Dict = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "patrickvonplaten/t5-tiny-random"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "sshleifer/bart-tiny-random"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
SCREAMING_SNAKE_CASE__ : List[Any] = "sshleifer/tiny-marian-en-de"
def __magic_name__ ( __lowerCAmelCase : Path , __lowerCAmelCase : list ) -> Union[str, Any]:
__lowerCamelCase = "\n".join(lowerCAmelCase_ )
Path(lowerCAmelCase_ ).open('''w''' ).writelines(lowerCAmelCase_ )
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase_ , f'''{split}.source''' ) , lowerCAmelCase_ )
_dump_articles(os.path.join(lowerCAmelCase_ , f'''{split}.target''' ) , lowerCAmelCase_ )
return tmp_dir
class lowerCAmelCase__ ( _UpperCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCamelCase = max(len(tokenizer.encode(A__ ) ) for a in ARTICLES )
__lowerCamelCase = max(len(tokenizer.encode(A__ ) ) for a in SUMMARIES )
__lowerCamelCase = 4
__lowerCamelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowerCamelCase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowerCamelCase = SeqaSeqDataset(
A__ , data_dir=A__ , type_path='''train''' , max_source_length=A__ , max_target_length=A__ , src_lang=A__ , tgt_lang=A__ , )
__lowerCamelCase = DataLoader(A__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(A__ , A__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowerCamelCase = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCamelCase = max(len(tokenizer.encode(A__ ) ) for a in ARTICLES )
__lowerCamelCase = max(len(tokenizer.encode(A__ ) ) for a in SUMMARIES )
__lowerCamelCase = 4
__lowerCamelCase = LegacySeqaSeqDataset(
A__ , data_dir=A__ , type_path='''train''' , max_source_length=20 , max_target_length=A__ , )
__lowerCamelCase = DataLoader(A__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __A ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
__lowerCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowerCamelCase = tmp_dir.joinpath('''train.source''' ).open().readlines()
__lowerCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(A__ , A__ , 1_28 , A__ )
__lowerCamelCase = {x.name for x in tmp_dir.iterdir()}
__lowerCamelCase = {x.name for x in save_dir.iterdir()}
__lowerCamelCase = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(A__ ) < len(A__ )
assert len(A__ ) == 1
assert len(packed_examples[0] ) == sum(len(A__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def __A ( self : Optional[int] ) -> List[Any]:
if not FAIRSEQ_AVAILABLE:
return
__lowerCamelCase = self._get_dataset(max_len=64 )
__lowerCamelCase = 64
__lowerCamelCase = ds.make_dynamic_sampler(A__ , required_batch_size_multiple=A__ )
__lowerCamelCase = [len(A__ ) for x in batch_sampler]
assert len(set(A__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(A__ ) == len(A__ ) # no dropped or added examples
__lowerCamelCase = DataLoader(A__ , batch_sampler=A__ , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCamelCase = []
__lowerCamelCase = []
for batch in data_loader:
__lowerCamelCase = batch["input_ids"].shape
__lowerCamelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowerCamelCase = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(A__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(A__ )
assert num_src_per_batch[0] == max(A__ )
if failures:
raise AssertionError(f'''too many tokens in {len(A__ )} batches''' )
def __A ( self : Union[str, Any] ) -> Dict:
__lowerCamelCase = self._get_dataset(max_len=5_12 )
__lowerCamelCase = 2
__lowerCamelCase = ds.make_sortish_sampler(A__ , shuffle=A__ )
__lowerCamelCase = DataLoader(A__ , batch_size=A__ , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCamelCase = DataLoader(A__ , batch_size=A__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=A__ )
__lowerCamelCase = tokenizer.pad_token_id
def count_pad_tokens(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="input_ids" ):
return [batch[k].eq(A__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(A__ , k='''labels''' ) ) < sum(count_pad_tokens(A__ , k='''labels''' ) )
assert sum(count_pad_tokens(A__ ) ) < sum(count_pad_tokens(A__ ) )
assert len(A__ ) == len(A__ )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Dict=10_00 , SCREAMING_SNAKE_CASE__ : Dict=1_28 ) -> Dict:
if os.getenv('''USE_REAL_DATA''' , A__ ):
__lowerCamelCase = "examples/seq2seq/wmt_en_ro"
__lowerCamelCase = max_len * 2 * 64
if not Path(A__ ).joinpath('''train.len''' ).exists():
save_len_file(A__ , A__ )
else:
__lowerCamelCase = "examples/seq2seq/test_data/wmt_en_ro"
__lowerCamelCase = max_len * 4
save_len_file(A__ , A__ )
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = SeqaSeqDataset(
A__ , data_dir=A__ , type_path='''train''' , max_source_length=A__ , max_target_length=A__ , n_obs=A__ , )
return ds, max_tokens, tokenizer
def __A ( self : str ) -> Union[str, Any]:
__lowerCamelCase = self._get_dataset()
__lowerCamelCase = set(DistributedSortishSampler(A__ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=A__ ) )
__lowerCamelCase = set(DistributedSortishSampler(A__ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=A__ ) )
assert idsa.intersection(A__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ , use_fast=A__ )
if tok_name == MBART_TINY:
__lowerCamelCase = SeqaSeqDataset(
A__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__lowerCamelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowerCamelCase = SeqaSeqDataset(
A__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__lowerCamelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(A__ ) == 1 if tok_name == BART_TINY else len(A__ ) == 0
| 298 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Union[str, Any] = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A : Dict = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ["input_ids", "attention_mask"]
_A : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , __a : List[Any]=None , __a : Optional[int]=None , __a : List[str]=None , __a : Dict="replace" , __a : List[str]="<s>" , __a : Optional[Any]="</s>" , __a : List[str]="</s>" , __a : List[Any]="<s>" , __a : int="<unk>" , __a : int="<pad>" , __a : List[Any]="<mask>" , __a : Any=False , __a : Optional[int]=True , **__a : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
__lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
__lowercase : List[Any] = getattr(A__ , pre_tok_state.pop("""type""" ) )
__lowercase : Any = add_prefix_space
__lowercase : List[Any] = pre_tok_class(**A__ )
__lowercase : Optional[int] = add_prefix_space
__lowercase : List[str] = "post_processor"
__lowercase : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
__lowercase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase : str = tuple(state["""sep"""] )
if "cls" in state:
__lowercase : Tuple = tuple(state["""cls"""] )
__lowercase : Tuple = False
if state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
__lowercase : Optional[Any] = add_prefix_space
__lowercase : str = True
if state.get("""trim_offsets""" , A__ ) != trim_offsets:
__lowercase : Optional[int] = trim_offsets
__lowercase : List[Any] = True
if changes_to_apply:
__lowercase : int = getattr(A__ , state.pop("""type""" ) )
__lowercase : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self : Tuple , __a : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
__lowercase : Any = value
def lowerCAmelCase ( self : int , *__a : Optional[Any] , **__a : int ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Optional[Any] = kwargs.get("""is_split_into_words""" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def lowerCAmelCase ( self : Union[str, Any] , *__a : Any , **__a : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Optional[int] = kwargs.get("""is_split_into_words""" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def lowerCAmelCase ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Union[str, Any]=None ) -> Any:
"""simple docstring"""
__lowercase : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : str = [self.sep_token_id]
__lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 149 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666 | 0 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : str , __A : Tuple , __A : Union[str, Any] ):
__A : Union[str, Any] = None
__A : Tuple = None
__A : Optional[int] = graph
self._normalize_graph(A__ , A__ )
__A : Union[str, Any] = len(A__ )
__A : Dict = None
def lowerCAmelCase_ ( self : Any , __A : List[str] , __A : List[Any] ):
if sources is int:
__A : List[str] = [sources]
if sinks is int:
__A : Tuple = [sinks]
if len(A__ ) == 0 or len(A__ ) == 0:
return
__A : List[Any] = sources[0]
__A : List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A__ ) > 1 or len(A__ ) > 1:
__A : Optional[int] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : Union[str, Any] = max_input_flow
__A : Optional[int] = 0
__A : int = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : Any = max_input_flow
__A : int = size - 1
def lowerCAmelCase_ ( self : List[str] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : int = algorithm(self )
class lowerCamelCase_ :
def __init__( self : List[Any] , __A : Dict ):
__A : Union[str, Any] = flow_network
__A : str = flow_network.verticesCount
__A : Dict = flow_network.sourceIndex
__A : List[Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : Tuple = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Optional[int] = True
def lowerCAmelCase_ ( self : List[Any] ):
pass
class lowerCamelCase_ ( _UpperCamelCase ):
def __init__( self : List[Any] , __A : Optional[int] ):
super().__init__(A__ )
# use this to save your result
__A : Tuple = -1
def lowerCAmelCase_ ( self : int ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _UpperCamelCase ):
def __init__( self : List[str] , __A : Optional[int] ):
super().__init__(A__ )
__A : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Dict = [0] * self.verticies_count
__A : str = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Optional[Any] = 0
while i < len(A__ ):
__A : List[Any] = vertices_list[i]
__A : Any = self.heights[vertex_index]
self.process_vertex(A__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A__ ) )
__A : int = 0
else:
i += 1
__A : Dict = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A__ , A__ )
self.relabel(A__ )
def lowerCAmelCase_ ( self : Union[str, Any] , __A : Any , __A : Any ):
__A : List[Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : List[Any] , __A : int ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : int = self.heights[to_index]
if min_height is not None:
__A : Union[str, Any] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Dict = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : Optional[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : Optional[Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : str = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Dict ) ->Union[str, Any]:
'''simple docstring'''
a : int = int(lowerCAmelCase_ )
assert noofclusters < len(lowerCAmelCase_ )
# Find out the dimensionality
a : Union[str, Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
a : Optional[int] = list(range(len(lowerCAmelCase_ ) ) )
shuffle(lowerCAmelCase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
a : Optional[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
a : Tuple = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
a : Union[str, Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCAmelCase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
a : Optional[Any] = tf.placeholder("float64" , [dim] )
a : Union[str, Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCAmelCase_ , lowerCAmelCase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
a : Optional[Any] = [tf.Variable(0 ) for i in range(len(lowerCAmelCase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
a : Tuple = tf.placeholder("int32" )
a : Optional[int] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCAmelCase_ , lowerCAmelCase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
a : Union[str, Any] = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
a : str = tf.reduce_mean(lowerCAmelCase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
a : Union[str, Any] = tf.placeholder("float" , [dim] )
a : Dict = tf.placeholder("float" , [dim] )
a : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCAmelCase_ , lowerCAmelCase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
a : Tuple = tf.placeholder("float" , [noofclusters] )
a : str = tf.argmin(lowerCAmelCase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
a : str = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCAmelCase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
a : int = 100
for _ in range(lowerCAmelCase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCAmelCase_ ) ):
a : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
a : int = [
sess.run(lowerCAmelCase_ , feed_dict={va: vect, va: sess.run(lowerCAmelCase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
a : List[str] = sess.run(
lowerCAmelCase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCAmelCase_ ):
# Collect all the vectors assigned to this cluster
a : Union[str, Any] = [
vectors[i]
for i in range(len(lowerCAmelCase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
a : Optional[int] = sess.run(
lowerCAmelCase_ , feed_dict={mean_input: array(lowerCAmelCase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
a : Dict = sess.run(lowerCAmelCase_ )
a : Optional[Any] = sess.run(lowerCAmelCase_ )
return centroids, assignments
| 633 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
class __lowercase (_UpperCamelCase ):
"""simple docstring"""
_UpperCAmelCase = ["input_values", "padding_mask"]
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2_4_0_0_0 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(feature_size=A__ , sampling_rate=A__ , padding_value=A__ , **A__ )
SCREAMING_SNAKE_CASE_ : Tuple = chunk_length_s
SCREAMING_SNAKE_CASE_ : Any = overlap
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = bool(
isinstance(A__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
SCREAMING_SNAKE_CASE_ : List[Any] = [np.asarray(A__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(A__ , np.ndarray ):
SCREAMING_SNAKE_CASE_ : Dict = np.asarray(A__ , dtype=np.floataa )
elif isinstance(A__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : Optional[int] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : int = [np.asarray(A__ ).T]
# verify inputs are valid
for idx, example in enumerate(A__ ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
SCREAMING_SNAKE_CASE_ : Any = min(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE_ : List[str] = int(np.floor(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
SCREAMING_SNAKE_CASE_ : List[str] = max(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE_ : Dict = int(np.ceil(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
SCREAMING_SNAKE_CASE_ : List[str] = "max_length"
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_values
# normal padding on batch
if padded_inputs is None:
SCREAMING_SNAKE_CASE_ : Tuple = self.pad(
A__ , max_length=A__ , truncation=A__ , padding=A__ , return_attention_mask=A__ , )
if padding:
SCREAMING_SNAKE_CASE_ : Optional[Any] = padded_inputs.pop('attention_mask' )
SCREAMING_SNAKE_CASE_ : str = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
SCREAMING_SNAKE_CASE_ : str = example[..., None]
input_values.append(example.T )
SCREAMING_SNAKE_CASE_ : Tuple = input_values
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = padded_inputs.convert_to_tensors(A__ )
return padded_inputs
| 101 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase =get_logger()
UpperCamelCase =None
class A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
super().__init__(features=A__ )
import jax
from jaxlib.xla_client import Device
if isinstance(A__ , A__ ):
raise ValueError(
F"Expected {device} to be a `str` not {type(A__ )}, as `jaxlib.xla_extension.Device` "
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCamelCase_ : Optional[int] = device if isinstance(A__ , A__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase_ : Union[str, Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
UpperCamelCase_ : List[str] = str(jax.devices()[0] )
UpperCamelCase_ : str = jnp_array_kwargs
@staticmethod
def _UpperCAmelCase ( ):
import jax
return {str(A__ ): device for device in jax.devices()}
def _UpperCAmelCase ( self , __lowerCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(A__ , A__ ) and column:
if all(
isinstance(A__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A__ , axis=0 )
return column
def _UpperCAmelCase ( self , __lowerCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(A__ , (str, bytes, type(A__ )) ):
return value
elif isinstance(A__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase_ : Tuple = {}
if isinstance(A__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase_ : Any = {"dtype": jnp.intaa}
else:
UpperCamelCase_ : List[str] = {"dtype": jnp.intaa}
elif isinstance(A__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase_ : Optional[int] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A__ , PIL.Image.Image ):
UpperCamelCase_ : List[Any] = np.asarray(A__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase_ : Union[str, Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A__ , **{**default_dtype, **self.jnp_array_kwargs} )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A__ , """__array__""" ) and not isinstance(A__ , jax.Array ):
UpperCamelCase_ : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A__ ) for substruct in data_struct] )
elif isinstance(A__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A__ ) for substruct in data_struct] )
return self._tensorize(A__ )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
return map_nested(self._recursive_tensorize , A__ , map_list=A__ )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = self.numpy_arrow_extractor().extract_row(A__ )
UpperCamelCase_ : List[Any] = self.python_features_decoder.decode_row(A__ )
return self.recursive_tensorize(A__ )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : int = self.numpy_arrow_extractor().extract_column(A__ )
UpperCamelCase_ : Any = self.python_features_decoder.decode_column(A__ , pa_table.column_names[0] )
UpperCamelCase_ : Any = self.recursive_tensorize(A__ )
UpperCamelCase_ : Any = self._consolidate(A__ )
return column
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = self.numpy_arrow_extractor().extract_batch(A__ )
UpperCamelCase_ : List[str] = self.python_features_decoder.decode_batch(A__ )
UpperCamelCase_ : Any = self.recursive_tensorize(A__ )
for column_name in batch:
UpperCamelCase_ : str = self._consolidate(batch[column_name] )
return batch
| 208 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
lowerCamelCase = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
class A ( _UpperCamelCase ):
UpperCamelCase__ : Dict =VOCAB_FILES_NAMES
UpperCamelCase__ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int =["input_ids", "attention_mask"]
UpperCamelCase__ : List[Any] =BartTokenizer
def __init__( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Any="replace" , lowercase_ : Optional[int]="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : List[Any]="</s>" , lowercase_ : List[str]="<s>" , lowercase_ : List[str]="<unk>" , lowercase_ : Any="<pad>" , lowercase_ : List[Any]="<mask>" , lowercase_ : int=False , lowercase_ : Union[str, Any]=True , **lowercase_ : Tuple , ) -> str:
"""simple docstring"""
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
_lowerCamelCase : Dict =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , A__ ) != add_prefix_space:
_lowerCamelCase : Dict =getattr(A__ , pre_tok_state.pop('type' ) )
_lowerCamelCase : int =add_prefix_space
_lowerCamelCase : List[str] =pre_tok_class(**A__ )
_lowerCamelCase : Any =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] ="post_processor"
_lowerCamelCase : Any =getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
_lowerCamelCase : List[Any] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str =tuple(state['sep'] )
if "cls" in state:
_lowerCamelCase : List[str] =tuple(state['cls'] )
_lowerCamelCase : List[Any] =False
if state.get('add_prefix_space' , A__ ) != add_prefix_space:
_lowerCamelCase : List[Any] =add_prefix_space
_lowerCamelCase : str =True
if state.get('trim_offsets' , A__ ) != trim_offsets:
_lowerCamelCase : Tuple =trim_offsets
_lowerCamelCase : Optional[int] =True
if changes_to_apply:
_lowerCamelCase : int =getattr(A__ , state.pop('type' ) )
_lowerCamelCase : List[Any] =component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase ( self : Optional[int] , lowercase_ : List[str] ) -> int:
"""simple docstring"""
_lowerCamelCase : Dict =AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
_lowerCamelCase : int =value
def lowerCamelCase ( self : List[Any] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
_lowerCamelCase : int =kwargs.get('is_split_into_words' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*A__ , **A__ )
def lowerCamelCase ( self : Optional[Any] , *lowercase_ : int , **lowercase_ : int ) -> BatchEncoding:
"""simple docstring"""
_lowerCamelCase : Dict =kwargs.get('is_split_into_words' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*A__ , **A__ )
def lowerCamelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase : List[Any] =self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase : int =[self.sep_token_id]
_lowerCamelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 464 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[str]=7 , snake_case_ : Optional[Any]=3 , snake_case_ : Any=18 , snake_case_ : List[Any]=30 , snake_case_ : Any=400 , snake_case_ : List[str]=True , snake_case_ : Optional[int]=None , snake_case_ : int=True , snake_case_ : Optional[int]=None , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=[0.48145466, 0.4578275, 0.40821073] , snake_case_ : Optional[int]=[0.26862954, 0.26130258, 0.27577711] , snake_case_ : List[str]=True , ):
__snake_case = size if size is not None else {"height": 224, "width": 224}
__snake_case = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
__snake_case = do_convert_rgb
def lowerCAmelCase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Dict=False , snake_case_ : List[str]=False , snake_case_ : Dict=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__snake_case = []
for i in range(self.batch_size ):
__snake_case = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
__snake_case = [torch.from_numpy(A__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[str] ):
__snake_case = ChineseCLIPImageProcessingTester(self , do_center_crop=A__ )
@property
def lowerCAmelCase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Any ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
self.assertTrue(hasattr(A__ , "do_center_crop" ) )
self.assertTrue(hasattr(A__ , "center_crop" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCAmelCase ( self : Union[str, Any] ):
pass
def lowerCAmelCase ( self : Any ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : Dict ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : str ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Tuple ):
__snake_case = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A__ )
__snake_case = 3
@property
def lowerCAmelCase ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
self.assertTrue(hasattr(A__ , "do_center_crop" ) )
self.assertTrue(hasattr(A__ , "center_crop" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Optional[int] ):
pass
def lowerCAmelCase ( self : Optional[Any] ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 163 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
_snake_case = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def snake_case ( _a: str )-> str:
'''simple docstring'''
with open(lowerCAmelCase_ , 'rb' ) as f:
lowerCamelCase__ = Image.open(lowerCAmelCase_ )
return im.convert('RGB' )
@dataclass
class _a :
a_ : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
a_ : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
a_ : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
a_ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
a_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _UpperCamelCase ( self : Optional[Any] ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class _a :
a_ : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
a_ : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
a_ : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
a_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a_ : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
a_ : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a_ : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case ( _a: Union[str, Any] )-> int:
'''simple docstring'''
lowerCamelCase__ = torch.stack([example['pixel_values'] for example in examples] )
lowerCamelCase__ = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def snake_case ( )-> str:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase__ = {}
if data_args.train_dir is not None:
lowerCamelCase__ = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
lowerCamelCase__ = os.path.join(data_args.validation_dir , '**' )
lowerCamelCase__ = load_dataset(
'imagefolder' , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
lowerCamelCase__ = dataset["train"].train_test_split(data_args.train_val_split )
lowerCamelCase__ = split["train"]
lowerCamelCase__ = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase__ = dataset["train"].features["labels"].names
lowerCamelCase__ = {}, {}
for i, label in enumerate(lowerCAmelCase_ ):
lowerCamelCase__ = str(lowerCAmelCase_ )
lowerCamelCase__ = label
# Load the accuracy metric from the datasets package
lowerCamelCase__ = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a: List[Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowerCamelCase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowerCamelCase__ = image_processor.size["shortest_edge"]
else:
lowerCamelCase__ = (image_processor.size["height"], image_processor.size["width"])
lowerCamelCase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowerCamelCase__ = Compose(
[
RandomResizedCrop(lowerCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowerCamelCase__ = Compose(
[
Resize(lowerCAmelCase_ ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(_a: Optional[int] ):
lowerCamelCase__ = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_a: Optional[Any] ):
lowerCamelCase__ = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCamelCase__ = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCamelCase__ = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase_ )
# Initalize our trainer
lowerCamelCase__ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ = trainer.evaluate()
trainer.log_metrics('eval' , lowerCAmelCase_ )
trainer.save_metrics('eval' , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
lowerCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 510 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCamelCase ( *lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Union[Dict, Any]] = None , lowerCamelCase : str=True , lowerCamelCase : Tuple=2):
from .. import __version__
A_ : Optional[Any] = take_from
A_ : List[str] = ()
if not isinstance(args[0] , lowerCAmelCase_):
A_ : Tuple = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCAmelCase_).base_version) >= version.parse(lowerCAmelCase_):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}')
A_ : Optional[Any] = None
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCAmelCase_),)
A_ : int = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_):
values += (getattr(lowerCAmelCase_ , lowerCAmelCase_),)
A_ : int = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A_ : Union[str, Any] = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A_ : Union[str, Any] = warning + " " if standard_warn else ""
warnings.warn(warning + message , lowerCAmelCase_ , stacklevel=lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and len(lowerCAmelCase_) > 0:
A_ : int = inspect.getouterframes(inspect.currentframe())[1]
A_ : Any = call_frame.filename
A_ : List[str] = call_frame.lineno
A_ : Dict = call_frame.function
A_ : Any = next(iter(deprecated_kwargs.items()))
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`')
if len(lowerCAmelCase_) == 0:
return
elif len(lowerCAmelCase_) == 1:
return values[0]
return values
| 665 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , ):
UpperCAmelCase__ : Any = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Tuple = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : Optional[Any] = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : str = apply_ocr
def snake_case__ ( self):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase :Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : Any = LayoutLMvaImageProcessingTester(self)
@property
def snake_case__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A__ , """do_resize"""))
self.assertTrue(hasattr(A__ , """size"""))
self.assertTrue(hasattr(A__ , """apply_ocr"""))
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
UpperCAmelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def snake_case__ ( self):
pass
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__)
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image)
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , A__)
self.assertIsInstance(encoding.boxes , A__)
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(A__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__)
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray)
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(A__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case__ ( self):
UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__)
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor)
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCAmelCase__ : Any = image_processing(A__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase__ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["""file"""]).convert("""RGB""")
UpperCAmelCase__ : Tuple = image_processing(A__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase__ : Optional[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCAmelCase__ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A__)
self.assertListEqual(encoding.boxes , A__)
# with apply_OCR = False
UpperCAmelCase__ : Dict = LayoutLMvaImageProcessor(apply_ocr=A__)
UpperCAmelCase__ : int = image_processing(A__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224)) | 407 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ :
a__ : str
a__ : str = None
@staticmethod
def __A ( ) -> Optional[int]:
raise NotImplementedError
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
raise NotImplementedError
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
raise NotImplementedError
def __A ( self : Optional[int] ) -> Union[str, Any]:
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def __A ( cls : Dict ) -> Any:
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowerCAmelCase__ ( _UpperCamelCase ):
a__ : int = "optuna"
@staticmethod
def __A ( ) -> List[str]:
return is_optuna_available()
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return run_hp_search_optuna(A__ , A__ , A__ , **A__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
return default_hp_space_optuna(A__ )
class lowerCAmelCase__ ( _UpperCamelCase ):
a__ : List[str] = "ray"
a__ : Dict = "'ray[tune]'"
@staticmethod
def __A ( ) -> Any:
return is_ray_available()
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
return run_hp_search_ray(A__ , A__ , A__ , **A__ )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
return default_hp_space_ray(A__ )
class lowerCAmelCase__ ( _UpperCamelCase ):
a__ : List[Any] = "sigopt"
@staticmethod
def __A ( ) -> Tuple:
return is_sigopt_available()
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
return run_hp_search_sigopt(A__ , A__ , A__ , **A__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
return default_hp_space_sigopt(A__ )
class lowerCAmelCase__ ( _UpperCamelCase ):
a__ : int = "wandb"
@staticmethod
def __A ( ) -> int:
return is_wandb_available()
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
return run_hp_search_wandb(A__ , A__ , A__ , **A__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return default_hp_space_wandb(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __magic_name__ ( ) -> Any:
__lowerCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCAmelCase_ ) > 0:
__lowerCamelCase = available_backends[0].name
if len(lowerCAmelCase_ ) > 1:
logger.info(
f'''{len(lowerCAmelCase_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 298 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def lowerCAmelCase ( self : Optional[Any] , __a : str , __a : str ) -> Optional[int]:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def lowerCAmelCase ( self : int , __a : Optional[int] , __a : Dict ) -> Optional[Any]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def lowerCAmelCase ( self : Any , __a : List[str] , __a : str ) -> Optional[int]:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def lowerCAmelCase ( self : List[Any] , __a : List[str] , __a : Optional[int] ) -> List[str]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def snake_case_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def snake_case_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
@require_beam
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowercase : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowercase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
import apache_beam as beam
__lowercase : Tuple = beam.io.parquetio.WriteToParquet
__lowercase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowercase : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__lowercase : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowercase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowercase : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowercase : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__lowercase : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset | 149 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ ( _UpperCamelCase ):
_lowercase : Optional[int] = ["pixel_values"]
def __init__( self : Dict , __A : bool = True , __A : int = 32 , __A : Union[str, Any]=PILImageResampling.BILINEAR , __A : bool = True , **__A : Any , ):
__A : List[Any] = do_resize
__A : Tuple = do_rescale
__A : Optional[Any] = size_divisor
__A : List[Any] = resample
super().__init__(**A__ )
def lowerCAmelCase_ ( self : Union[str, Any] , __A : np.ndarray , __A : int , __A : List[Any] , __A : Optional[ChannelDimension] = None , **__A : List[Any] ):
__A : Union[str, Any] = get_image_size(A__ )
# Rounds the height and width down to the closest multiple of size_divisor
__A : Optional[int] = height // size_divisor * size_divisor
__A : Dict = width // size_divisor * size_divisor
__A : Union[str, Any] = resize(A__ , (new_h, new_w) , resample=A__ , data_format=A__ , **A__ )
return image
def lowerCAmelCase_ ( self : str , __A : np.ndarray , __A : float , __A : Optional[ChannelDimension] = None , **__A : Union[str, Any] ):
return rescale(image=A__ , scale=A__ , data_format=A__ , **A__ )
def lowerCAmelCase_ ( self : Tuple , __A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __A : Optional[bool] = None , __A : Optional[int] = None , __A : Tuple=None , __A : Optional[bool] = None , __A : Optional[Union[TensorType, str]] = None , __A : ChannelDimension = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : str = do_resize if do_resize is not None else self.do_resize
__A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__A : List[str] = size_divisor if size_divisor is not None else self.size_divisor
__A : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
__A : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
__A : List[str] = [to_numpy_array(A__ ) for img in images]
if do_resize:
__A : List[str] = [self.resize(A__ , size_divisor=A__ , resample=A__ ) for image in images]
if do_rescale:
__A : str = [self.rescale(A__ , scale=1 / 255 ) for image in images]
__A : Tuple = [to_channel_dimension_format(A__ , A__ ) for image in images]
__A : Dict = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 17 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCamelCase ( _UpperCamelCase ):
def __get__( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
a : List[str] = "__cached_" + self.fget.__name__
a : Dict = getattr(A__ , A__ , A__ )
if cached is None:
a : List[str] = self.fget(A__ )
setattr(A__ , A__ , A__ )
return cached
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Optional[Any]:
'''simple docstring'''
a : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->List[str]:
'''simple docstring'''
if is_torch_fx_proxy(lowerCAmelCase_ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase_ , np.ndarray )
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Tuple:
'''simple docstring'''
return isinstance(lowerCAmelCase_ , np.ndarray )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
return _is_numpy(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->int:
'''simple docstring'''
import torch
return isinstance(lowerCAmelCase_ , torch.Tensor )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
import torch
return isinstance(lowerCAmelCase_ , torch.device )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->str:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
a : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return False
return isinstance(lowerCAmelCase_ , torch.dtype )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->int:
'''simple docstring'''
import tensorflow as tf
return isinstance(lowerCAmelCase_ , tf.Tensor )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->int:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Any:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase_ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCAmelCase_ )
return type(lowerCAmelCase_ ) == tf.Tensor
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->Union[str, Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase_ , jnp.ndarray )
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->Dict:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase_ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase_ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase_ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase_ ):
return np.asarray(lowerCAmelCase_ ).tolist()
elif isinstance(lowerCAmelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase_ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return np.array(lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase_ ):
return np.asarray(lowerCAmelCase_ )
else:
return obj
class __UpperCamelCase ( _UpperCamelCase ):
def __a ( self ) -> List[Any]:
a : Any = fields(self )
# Safety and consistency checks
if not len(A__ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
a : List[Any] = getattr(self , class_fields[0].name )
a : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A__ ):
if isinstance(A__ , A__ ):
a : Any = first_field.items()
a : Tuple = True
else:
try:
a : Optional[Any] = iter(A__ )
a : Optional[int] = True
except TypeError:
a : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A__ ):
if (
not isinstance(A__ , (list, tuple) )
or not len(A__ ) == 2
or not isinstance(element[0] , A__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
a : Tuple = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
a : Dict = element[1]
elif first_field is not None:
a : Union[str, Any] = first_field
else:
for field in class_fields:
a : str = getattr(self , field.name )
if v is not None:
a : str = v
def __delitem__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , lowerCAmelCase__ ) -> Tuple:
if isinstance(A__ , A__ ):
a : Tuple = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A__ , A__ )
super().__setattr__(A__ , A__ )
def __setitem__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
super().__setitem__(A__ , A__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A__ , A__ )
def __a ( self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class __UpperCamelCase ( _UpperCamelCase , _UpperCamelCase ):
@classmethod
def __a ( cls , lowerCAmelCase__ ) -> Tuple:
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __UpperCamelCase ( _UpperCamelCase ):
lowerCamelCase : Dict ="longest"
lowerCamelCase : int ="max_length"
lowerCamelCase : List[str] ="do_not_pad"
class __UpperCamelCase ( _UpperCamelCase ):
lowerCamelCase : str ="pt"
lowerCamelCase : Optional[int] ="tf"
lowerCamelCase : str ="np"
lowerCamelCase : Union[str, Any] ="jax"
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Any:
a : List[str] = context_managers
a : Tuple = ExitStack()
def __enter__( self ) -> int:
for context_manager in self.context_managers:
self.stack.enter_context(A__ )
def __exit__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
self.stack.__exit__(*A__ , **A__ )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Dict:
'''simple docstring'''
a : Any = infer_framework(lowerCAmelCase_ )
if framework == "tf":
a : List[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
a : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
a : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
a : str = model_class.__name__
a : Optional[int] = infer_framework(lowerCAmelCase_ )
if framework == "tf":
a : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
a : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
a : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _SCREAMING_SNAKE_CASE ( _lowercase : MutableMapping , _lowercase : str = "" , _lowercase : str = "." ) ->Optional[Any]:
'''simple docstring'''
def _flatten_dict(_lowercase : List[Any] , _lowercase : List[str]="" , _lowercase : Dict="." ):
for k, v in d.items():
a : List[Any] = str(lowerCAmelCase_ ) + delimiter + str(lowerCAmelCase_ ) if parent_key else k
if v and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
yield from flatten_dict(lowerCAmelCase_ , lowerCAmelCase_ , delimiter=lowerCAmelCase_ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
@contextmanager
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : bool = False ) ->List[str]:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[int]=None ) ->Any:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.transpose(lowerCAmelCase_ , axes=lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.T if axes is None else array.permute(*lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase_ , perm=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.transpose(lowerCAmelCase_ , axes=lowerCAmelCase_ )
else:
raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase_ )}.""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.reshape(*lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase_ )}.""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Optional[int]=None ) ->str:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.squeeze(lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase_ )}.""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Tuple ) ->Tuple:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.expand_dims(lowerCAmelCase_ , lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.unsqueeze(dim=lowerCAmelCase_ )
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase_ , axis=lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return jnp.expand_dims(lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase_ )}.""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
if is_numpy_array(lowerCAmelCase_ ):
return np.size(lowerCAmelCase_ )
elif is_torch_tensor(lowerCAmelCase_ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase_ ):
import tensorflow as tf
return tf.size(lowerCAmelCase_ )
elif is_jax_tensor(lowerCAmelCase_ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase_ )}.""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Any ) ->Union[str, Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(lowerCAmelCase_ , (tuple, list) ):
a : List[str] = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
a : Union[str, Any] = F"""{repo_id}--{value}"""
return auto_map
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
for base_class in inspect.getmro(lowerCAmelCase_ ):
a : Tuple = base_class.__module__
a : Union[str, Any] = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 633 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : int , A__ : Optional[int] , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Any , A__ : List[str] , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case__ ( _UpperCamelCase ):
@require_beam
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Tuple = beam.io.parquetio.WriteToParquet
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 666 | 0 |
import torch
from transformers import AutoModel
class __lowercase (torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(A__ , self ).__init__()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModel.from_pretrained(A__ , return_dict=A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1E-08 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.nn.Softmax(dim=1 )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return self.bert(**A__ ).last_hidden_state
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=A__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(A__ , A__ ) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = W_supports["sizes"].tolist()
SCREAMING_SNAKE_CASE_ : Dict = W_supports["start_token_id"].item()
SCREAMING_SNAKE_CASE_ : List[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
SCREAMING_SNAKE_CASE_ : List[Any] = self.BERT(**A__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.BERT(**A__ )
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = W_supports["input_ids"] == start_token_id
SCREAMING_SNAKE_CASE_ : Any = W_supports["input_ids"] == end_token_id
for i, size in enumerate(A__ ):
if i == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = 0
else:
SCREAMING_SNAKE_CASE_ : int = support_sizes[i - 1]
SCREAMING_SNAKE_CASE_ : Dict = S[s : s + size][start_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE_ : str = S[s : s + size][end_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.vstack((p_starts, p_start) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = p_start
SCREAMING_SNAKE_CASE_ : int = p_end
return p_starts, p_ends
| 101 | import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase ={
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 | from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Any = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ : Tuple = mid + 1
else:
snake_case_ : Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Optional[Any] = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ : Optional[Any] = mid + 1
else:
snake_case_ : Tuple = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Dict = 0
snake_case_ : Tuple = len(lowerCAmelCase_ ) - 1
while left <= right:
snake_case_ : int = left + (right - left) // 2
snake_case_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ : Optional[Any] = midpoint - 1
else:
snake_case_ : Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Optional[int] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if right < left:
return None
snake_case_ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 666 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A ( _UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ : Optional[int] =1
@register_to_config
def __init__( self : Any , lowercase_ : Any=2000 , lowercase_ : str=0.1 , lowercase_ : List[Any]=20 , lowercase_ : int=1E-3 ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =None
_lowerCamelCase : List[Any] =None
_lowerCamelCase : int =None
def lowerCamelCase ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
_lowerCamelCase : str =torch.linspace(1 , self.config.sampling_eps , A__ , device=A__ )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : int=None ) -> List[Any]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowerCamelCase : Optional[Any] =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowerCamelCase : Union[str, Any] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowerCamelCase : Optional[Any] =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowerCamelCase : Tuple =std.unsqueeze(-1 )
_lowerCamelCase : List[Any] =-score / std
# compute
_lowerCamelCase : List[Any] =-1.0 / len(self.timesteps )
_lowerCamelCase : Dict =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowerCamelCase : Union[str, Any] =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowerCamelCase : Any =beta_t.unsqueeze(-1 )
_lowerCamelCase : Any =-0.5 * beta_t * x
_lowerCamelCase : Union[str, Any] =torch.sqrt(A__ )
_lowerCamelCase : str =drift - diffusion**2 * score
_lowerCamelCase : Union[str, Any] =x + drift * dt
# add noise
_lowerCamelCase : int =randn_tensor(x.shape , layout=x.layout , generator=A__ , device=x.device , dtype=x.dtype )
_lowerCamelCase : List[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 464 | import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A__ : VQModel , A__ : UNetaDModel , A__ : DDIMScheduler ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : str , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : float = 0.0 , A__ : int = 50 , A__ : Optional[str] = "pil" , A__ : bool = True , **A__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , )
snake_case_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : List[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case_ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VAE
snake_case_ : int = self.vqvae.decode(A__ ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 666 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__snake_case = u
for i in range(1 , lowerCAmelCase_ ):
__snake_case = temp * (u - i)
return temp
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
__snake_case = int(input("enter the numbers of values: " ) )
__snake_case = []
for _ in range(lowerCAmelCase_ ):
y.append([] )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
y[i].append(lowerCAmelCase_ )
__snake_case = 0
print("enter the values of parameters in a list: " )
__snake_case = list(map(lowerCAmelCase_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCAmelCase_ ):
__snake_case = float(input() )
__snake_case = int(input("enter the value to interpolate: " ) )
__snake_case = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase_ ):
for j in range(n - i ):
__snake_case = y[j + 1][i - 1] - y[j][i - 1]
__snake_case = y[0][0]
for i in range(1 , lowerCAmelCase_ ):
summ += (ucal(lowerCAmelCase_ , lowerCAmelCase_ ) * y[0][i]) / math.factorial(lowerCAmelCase_ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 163 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 0 |
"""simple docstring"""
def snake_case ( _a: int )-> str:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
lowerCamelCase__ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,_a : str ,_a : Any=13 ,_a : Optional[Any]=32 ,_a : Optional[Any]=2 ,_a : int=3 ,_a : Union[str, Any]=16 ,_a : Optional[int]=[1, 2, 1] ,_a : Optional[int]=[2, 2, 4] ,_a : Dict=2 ,_a : Optional[Any]=2.0 ,_a : Optional[int]=True ,_a : List[str]=0.0 ,_a : List[Any]=0.0 ,_a : Any=0.1 ,_a : int="gelu" ,_a : int=False ,_a : Dict=True ,_a : List[Any]=0.02 ,_a : Dict=1e-5 ,_a : Any=True ,_a : Optional[int]=None ,_a : Optional[Any]=True ,_a : int=10 ,_a : int=8 ,_a : str=["stage1", "stage2", "stage3"] ,_a : Optional[int]=[1, 2, 3] ,):
'''simple docstring'''
A_ : Optional[int] = parent
A_ : Optional[int] = batch_size
A_ : Optional[int] = image_size
A_ : Tuple = patch_size
A_ : Optional[Any] = num_channels
A_ : Any = embed_dim
A_ : Optional[int] = depths
A_ : Union[str, Any] = num_heads
A_ : int = window_size
A_ : int = mlp_ratio
A_ : List[str] = qkv_bias
A_ : Any = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Optional[int] = drop_path_rate
A_ : Optional[Any] = hidden_act
A_ : Union[str, Any] = use_absolute_embeddings
A_ : Optional[Any] = patch_norm
A_ : Any = layer_norm_eps
A_ : Any = initializer_range
A_ : Union[str, Any] = is_training
A_ : List[Any] = scope
A_ : List[Any] = use_labels
A_ : Dict = type_sequence_label_size
A_ : List[str] = encoder_stride
A_ : Union[str, Any] = out_features
A_ : Any = out_indices
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[str] = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _a ( self : Union[str, Any] ,_a : Dict ,_a : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
A_ : int = MaskFormerSwinModel(config=A__ )
model.to(A__ )
model.eval()
A_ : Tuple = model(A__ )
A_ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A_ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : Optional[int] ,_a : Dict ,_a : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
A_ : Any = MaskFormerSwinBackbone(config=A__ )
model.to(A__ )
model.eval()
A_ : Union[str, Any] = model(A__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(A__ ):
A_ : Tuple = ["stem"]
A_ : Any = MaskFormerSwinBackbone(config=A__ )
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ : str = config_and_inputs
A_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def _a ( self : int ):
'''simple docstring'''
A_ : int = MaskFormerSwinModelTester(self )
A_ : Tuple = ConfigTester(self ,config_class=A__ ,embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a ( self : Tuple ):
'''simple docstring'''
pass
def _a ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
'''simple docstring'''
return
def _a ( self : int ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a ( self : Any ):
'''simple docstring'''
pass
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
A_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ ,nn.Linear ) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(A__ )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[Any] = [*signature.parameters.keys()]
A_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a ( self : Tuple ):
'''simple docstring'''
pass
def _a ( self : int ,_a : List[Any] ,_a : Optional[int] ,_a : Optional[int] ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
A_ : List[str] = model(**self._prepare_for_class(A__ ,A__ ) )
A_ : str = outputs.hidden_states
A_ : int = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A__ ) ,A__ )
# Swin has a different seq_length
A_ : Tuple = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A_ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A_ : List[str] = True
self.check_hidden_states_output(A__ ,A__ ,A__ ,A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[Any] = True
self.check_hidden_states_output(A__ ,A__ ,A__ ,A__ )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[int] = 3
A_ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A_ : str = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A_ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A_ : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A_ : Optional[Any] = True
self.check_hidden_states_output(A__ ,A__ ,A__ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
self.check_hidden_states_output(A__ ,A__ ,A__ ,(padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
pass
def _a ( self : str ):
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_a : str ):
A_ : List[Any] = 0
return t
def check_equivalence(_a : List[Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple={} ):
with torch.no_grad():
A_ : List[Any] = model(**A__ ,return_dict=A__ ,**A__ )
A_ : Tuple = model(**A__ ,return_dict=A__ ,**A__ ).to_tuple()
def recursive_check(_a : Union[str, Any] ,_a : Tuple ):
if isinstance(A__ ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(A__ ,A__ ):
recursive_check(A__ ,A__ )
elif isinstance(A__ ,A__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(A__ ,A__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(A__ ) ,set_nan_tensor_to_zero(A__ ) ,atol=1e-5 ) ,msg=(
"""Tuple and dict output are not equal. Difference:"""
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(A__ ).any()} and `inf`: {torch.isinf(A__ )}. Dict has'
f' `nan`: {torch.isnan(A__ ).any()} and `inf`: {torch.isinf(A__ )}.'
) ,)
recursive_check(A__ ,A__ )
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(A__ )
model.to(A__ )
model.eval()
A_ : Optional[int] = self._prepare_for_class(A__ ,A__ )
A_ : Optional[int] = self._prepare_for_class(A__ ,A__ )
check_equivalence(A__ ,A__ ,A__ )
A_ : List[Any] = self._prepare_for_class(A__ ,A__ ,return_labels=A__ )
A_ : Optional[int] = self._prepare_for_class(A__ ,A__ ,return_labels=A__ )
check_equivalence(A__ ,A__ ,A__ )
A_ : str = self._prepare_for_class(A__ ,A__ )
A_ : Tuple = self._prepare_for_class(A__ ,A__ )
check_equivalence(A__ ,A__ ,A__ ,{"""output_hidden_states""": True} )
A_ : Union[str, Any] = self._prepare_for_class(A__ ,A__ ,return_labels=A__ )
A_ : Tuple = self._prepare_for_class(A__ ,A__ ,return_labels=A__ )
check_equivalence(A__ ,A__ ,A__ ,{"""output_hidden_states""": True} )
@require_torch
class __lowerCAmelCase ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
a_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a_ = MaskFormerSwinConfig
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = MaskFormerSwinModelTester(self )
def _a ( self : Any ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[int] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
A_ : Tuple = backbone_class(A__ )
backbone.to(A__ )
backbone.eval()
A_ : List[Any] = backbone(**A__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,A__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
A_ : Union[str, Any] = backbone(**A__ ,output_hidden_states=A__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
A_ : Any = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
A_ : str = backbone(**A__ ,output_attentions=A__ )
self.assertIsNotNone(outputs.attentions )
| 665 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | 0 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def _UpperCamelCase ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCAmelCase__ : Any = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ : Optional[Any] = json.loads(lowerCAmelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ : List[str] = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ : List[str] = json.loads(lowerCAmelCase_ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , lowerCAmelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _snake_case ( _UpperCamelCase ):
lowerCAmelCase :str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def snake_case__ ( self):
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , A__ , )
@cached_property
def snake_case__ ( self):
logger.info("""PyTorch: setting up devices""")
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""")
if self.no_cuda:
UpperCAmelCase__ : Optional[int] = torch.device("""cpu""")
UpperCAmelCase__ : int = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ : Dict = smp.local_rank()
UpperCAmelCase__ : List[str] = torch.device("""cuda""" , A__)
UpperCAmelCase__ : Optional[int] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta)
UpperCAmelCase__ : Tuple = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK"""))
UpperCAmelCase__ : Union[str, Any] = torch.device("""cuda""" , self.local_rank)
UpperCAmelCase__ : Any = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ : str = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta)
UpperCAmelCase__ : Optional[Any] = torch.device("""cuda""" , self.local_rank)
UpperCAmelCase__ : List[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(A__)
return device
@property
def snake_case__ ( self):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case__ ( self):
return not is_sagemaker_model_parallel_available()
@property
def snake_case__ ( self):
return False | 407 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=0 , ) -> Union[str, Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = projection_dim
def __A ( self : Optional[int] ) -> List[str]:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
__lowerCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
__lowerCamelCase = TFDPRContextEncoder(config=A__ )
__lowerCamelCase = model(A__ , attention_mask=A__ , token_type_ids=A__ )
__lowerCamelCase = model(A__ , token_type_ids=A__ )
__lowerCamelCase = model(A__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
__lowerCamelCase = TFDPRQuestionEncoder(config=A__ )
__lowerCamelCase = model(A__ , attention_mask=A__ , token_type_ids=A__ )
__lowerCamelCase = model(A__ , token_type_ids=A__ )
__lowerCamelCase = model(A__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
__lowerCamelCase = TFDPRReader(config=A__ )
__lowerCamelCase = model(A__ , attention_mask=A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __A ( self : int ) -> Optional[int]:
__lowerCamelCase = self.prepare_config_and_inputs()
(
__lowerCamelCase
) = config_and_inputs
__lowerCamelCase = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
a__ : List[Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a__ : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
a__ : str = False
a__ : Tuple = False
a__ : Optional[Any] = False
a__ : Union[str, Any] = False
a__ : Optional[Any] = False
def __A ( self : Optional[int] ) -> List[str]:
__lowerCamelCase = TFDPRModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def __A ( self : Any ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*A__ )
def __A ( self : List[Any] ) -> List[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*A__ )
def __A ( self : Dict ) -> Tuple:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*A__ )
@slow
def __A ( self : str ) -> Dict:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFDPRContextEncoder.from_pretrained(A__ )
self.assertIsNotNone(A__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFDPRContextEncoder.from_pretrained(A__ )
self.assertIsNotNone(A__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFDPRQuestionEncoder.from_pretrained(A__ )
self.assertIsNotNone(A__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFDPRReader.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
__lowerCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
__lowerCamelCase = model(A__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__lowerCamelCase = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 298 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : str = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : int = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__lowercase : Union[str, Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase : int = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowercase : Optional[int] = DDPMScheduler()
__lowercase : List[Any] = AudioDiffusionPipeline(vqvae=A__ , unet=self.dummy_unet , mel=A__ , scheduler=A__ )
__lowercase : int = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowercase : List[str] = torch.Generator(device=A__ ).manual_seed(42 )
__lowercase : List[Any] = pipe(generator=A__ , steps=4 )
__lowercase : List[Any] = output.audios[0]
__lowercase : Dict = output.images[0]
__lowercase : Dict = torch.Generator(device=A__ ).manual_seed(42 )
__lowercase : Union[str, Any] = pipe(generator=A__ , steps=4 , return_dict=A__ )
__lowercase : str = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowercase : Optional[int] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__lowercase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
__lowercase : Union[str, Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowercase : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowercase : Optional[int] = DDIMScheduler()
__lowercase : Union[str, Any] = self.dummy_vqvae_and_unet
__lowercase : Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A__ , scheduler=A__ )
__lowercase : List[Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
np.random.seed(0 )
__lowercase : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowercase : str = torch.Generator(device=A__ ).manual_seed(42 )
__lowercase : str = pipe(raw_audio=A__ , generator=A__ , start_step=5 , steps=10 )
__lowercase : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowercase : str = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__lowercase : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowercase : List[str] = self.dummy_unet_condition
__lowercase : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A__ , mel=A__ , scheduler=A__ )
__lowercase : Union[str, Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
np.random.seed(0 )
__lowercase : Optional[Any] = torch.rand((1, 1, 10) )
__lowercase : List[Any] = pipe(generator=A__ , encoding=A__ )
__lowercase : Optional[int] = output.images[0]
__lowercase : Dict = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__lowercase : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = torch_device
__lowercase : Tuple = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__lowercase : Dict = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowercase : List[Any] = torch.Generator(device=A__ ).manual_seed(42 )
__lowercase : List[str] = pipe(generator=A__ )
__lowercase : int = output.audios[0]
__lowercase : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowercase : Optional[int] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__lowercase : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 149 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666 | 0 |
import math
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Optional[int]:
__A : Any = []
__A : List[str] = 2
__A : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
__A : str = [True] * (end + 1)
__A : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start ,end + 1 ,lowerCAmelCase_ ):
__A : Union[str, Any] = False
start += 1
prime += in_prime
__A : Dict = end + 1
__A : Dict = min(2 * end ,lowerCAmelCase_ )
while low <= n:
__A : Any = [True] * (high - low + 1)
for each in in_prime:
__A : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ ,high + 1 ,lowerCAmelCase_ ):
__A : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
__A : int = high + 1
__A : Union[str, Any] = min(high + end ,lowerCAmelCase_ )
return prime
print(sieve(10**6))
| 17 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : str = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->Tuple:
'''simple docstring'''
a : int = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
a : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
a : List[Any] = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCAmelCase_ , output_all_encodings=lowerCAmelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCAmelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
a : Optional[int] = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
a : Optional[int] = os.path.join(get_home_dir() , "models" )
a : List[Any] = _load_vocab(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls=lowerCAmelCase_ )
a : str = nlp.model.BERTModel(
lowerCAmelCase_ , len(lowerCAmelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCAmelCase_ , use_token_type_embed=lowerCAmelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCAmelCase_ , use_decoder=lowerCAmelCase_ , )
original_bort.load_parameters(lowerCAmelCase_ , cast_dtype=lowerCAmelCase_ , ignore_extra=lowerCAmelCase_ )
a : Optional[int] = original_bort._collect_params_with_prefix()
# Build our config 🤗
a : Tuple = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCAmelCase_ ),
}
a : Union[str, Any] = BertConfig.from_dict(lowerCAmelCase_ )
a : Union[str, Any] = BertForMaskedLM(lowerCAmelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_lowercase : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_lowercase : Tuple , _lowercase : int ):
a : Union[str, Any] = hf_param.shape
a : List[Any] = to_torch(params[gluon_param] )
a : Optional[int] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
a : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
a : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
a : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
a : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
a : BertSelfAttention = layer.attention.self
a : Tuple = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
a : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
a : List[str] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
a : str = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
a : int = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
a : int = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
a : BertSelfOutput = layer.attention.output
a : Dict = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
a : Dict = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
a : Union[str, Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
a : str = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
a : BertIntermediate = layer.intermediate
a : Optional[Any] = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
a : BertOutput = layer.output
a : Optional[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
a : Optional[Any] = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
a : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
a : Any = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
a : List[Any] = RobertaTokenizer.from_pretrained("roberta-base" )
a : Tuple = tokenizer.encode_plus(lowerCAmelCase_ )["input_ids"]
# Get gluon output
a : Optional[Any] = mx.nd.array([input_ids] )
a : Optional[int] = original_bort(inputs=lowerCAmelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase_ )
a : List[str] = BertModel.from_pretrained(lowerCAmelCase_ )
hf_bort_model.eval()
a : Optional[Any] = tokenizer.encode_plus(lowerCAmelCase_ , return_tensors="pt" )
a : List[Any] = hf_bort_model(**lowerCAmelCase_ )[0]
a : Union[str, Any] = output_gluon[0].asnumpy()
a : Dict = output_hf[0].detach().numpy()
a : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
a : Dict = np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCAmelCase_ )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 633 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files', [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
], )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json', 'w' ) as f:
f.write('{\"default\": {\"dataset_size\": 42}}' )
SCREAMING_SNAKE_CASE_ : str = DatasetInfosDict.from_directory(lowerCAmelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'dataset_info', [
DatasetInfo(),
DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=4_2, ),
], )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(lowerCAmelCase_ )
dataset_info.write_to_directory(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = DatasetInfo.from_directory(lowerCAmelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCAmelCase_, 'dataset_info.json' ) )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DatasetInfo(
description='foo', citation='bar', homepage='https://foo.bar', license='CC0', features=Features({'a': Value('int32' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train', 'num_examples': 4_2}], download_checksums={}, download_size=1_3_3_7, post_processing_size=4_4_2, dataset_size=1_2_3_4, size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4, )
SCREAMING_SNAKE_CASE_ : Any = dataset_info._to_yaml_dict()
assert sorted(lowerCAmelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
SCREAMING_SNAKE_CASE_ : int = yaml.safe_dump(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int = yaml.safe_load(lowerCAmelCase_ )
assert dataset_info_yaml_dict == reloaded
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DatasetInfo()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict', [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=4_2, )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=4_2 ),
'v2': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
], )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = str(lowerCAmelCase_ )
dataset_infos_dict.write_to_directory(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any = DatasetInfosDict.from_directory(lowerCAmelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE_ : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCAmelCase_, 'README.md' ) )
| 101 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 0 |
'''simple docstring'''
UpperCamelCase ="\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCamelCase =[{"type": "code", "content": INSTALL_CONTENT}]
UpperCamelCase ={
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 208 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A ( unittest.TestCase ):
UpperCamelCase__ : str =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ : Optional[Any] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ : Any ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ : Optional[Any] ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : Dict , lowercase_ : str ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] =ZeroShotClassificationPipeline(
model=A__ , tokenizer=A__ , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(A__ , {'sequence': ANY(A__ ), 'labels': [ANY(A__ )], 'scores': [ANY(A__ )]} )
# No kwarg
_lowerCamelCase : Optional[Any] =classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(A__ , {'sequence': ANY(A__ ), 'labels': [ANY(A__ )], 'scores': [ANY(A__ )]} )
_lowerCamelCase : Optional[int] =classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(A__ , {'sequence': ANY(A__ ), 'labels': [ANY(A__ )], 'scores': [ANY(A__ )]} )
_lowerCamelCase : int =classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
A__ , {'sequence': ANY(A__ ), 'labels': [ANY(A__ ), ANY(A__ )], 'scores': [ANY(A__ ), ANY(A__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_lowerCamelCase : Dict =classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
A__ , {'sequence': ANY(A__ ), 'labels': [ANY(A__ ), ANY(A__ )], 'scores': [ANY(A__ ), ANY(A__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_lowerCamelCase : Union[str, Any] =classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(A__ , {'sequence': ANY(A__ ), 'labels': [ANY(A__ )], 'scores': [ANY(A__ )]} )
# https://github.com/huggingface/transformers/issues/13846
_lowerCamelCase : Tuple =classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
A__ , [
{'sequence': ANY(A__ ), 'labels': [ANY(A__ ), ANY(A__ )], 'scores': [ANY(A__ ), ANY(A__ )]}
for i in range(1 )
] , )
_lowerCamelCase : int =classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
A__ , [
{'sequence': ANY(A__ ), 'labels': [ANY(A__ ), ANY(A__ )], 'scores': [ANY(A__ ), ANY(A__ )]}
for i in range(2 )
] , )
with self.assertRaises(A__ ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(A__ ):
classifier(A__ , candidate_labels='politics' )
with self.assertRaises(A__ ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(A__ ):
classifier('Who are you voting for in 2020?' , candidate_labels=A__ )
with self.assertRaises(A__ ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(A__ ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=A__ , )
self.run_entailment_id(A__ )
def lowerCamelCase ( self : Optional[int] , lowercase_ : Pipeline ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] =zero_shot_classifier.model.config
_lowerCamelCase : Any =config.labelaid
_lowerCamelCase : Dict =zero_shot_classifier.entailment_id
_lowerCamelCase : List[Any] ={"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_lowerCamelCase : Tuple ={"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_lowerCamelCase : str ={"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_lowerCamelCase : List[str] ={"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_lowerCamelCase : List[Any] =original_labelaid
self.assertEqual(A__ , zero_shot_classifier.entailment_id )
@require_torch
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
_lowerCamelCase : Any =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_lowerCamelCase : Dict =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A__ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_lowerCamelCase : Any =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A__ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Tuple =pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_lowerCamelCase : Union[str, Any] =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A__ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
_lowerCamelCase : List[str] =zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=A__ , )
self.assertEqual(
nested_simplify(A__ ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_lowerCamelCase : Any =pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_lowerCamelCase : int =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A__ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
_lowerCamelCase : List[Any] =zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=A__ , )
self.assertEqual(
nested_simplify(A__ ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 464 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPConfig
_SCREAMING_SNAKE_CASE : List[Any] = ["CLIPEncoderLayer"]
def __init__( self : Any , snake_case_ : CLIPConfig ):
super().__init__(A__ )
__snake_case = CLIPVisionModelWithProjection(config.vision_config )
__snake_case = nn.Linear(config.vision_config.projection_dim , 1 )
__snake_case = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCAmelCase ( self : Tuple , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any]=0.5 , snake_case_ : Optional[Any]=0.5 ):
__snake_case = self.vision_model(A__ )[0]
__snake_case = self.p_head(A__ )
__snake_case = nsfw_detected.flatten()
__snake_case = nsfw_detected > p_threshold
__snake_case = nsfw_detected.tolist()
if any(A__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(A__ ):
if nsfw_detected_:
__snake_case = np.zeros(images[idx].shape )
__snake_case = self.w_head(A__ )
__snake_case = watermark_detected.flatten()
__snake_case = watermark_detected > w_threshold
__snake_case = watermark_detected.tolist()
if any(A__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(A__ ):
if watermark_detected_:
__snake_case = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 163 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 510 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A =logging.get_logger(__name__)
class _snake_case ( _UpperCamelCase ):
lowerCAmelCase :str = ["pixel_values"]
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**A__)
UpperCAmelCase__ : Optional[int] = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase__ : Dict = get_size_dict(A__ , default_to_square=A__)
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase__ : Any = get_size_dict(A__ , param_name="""crop_size""")
UpperCAmelCase__ : int = do_resize
UpperCAmelCase__ : Optional[Any] = size
UpperCAmelCase__ : Optional[Any] = resample
UpperCAmelCase__ : Optional[int] = do_center_crop
UpperCAmelCase__ : List[Any] = crop_size
UpperCAmelCase__ : List[Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : Optional[Any] = do_normalize
UpperCAmelCase__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Optional[Any] = get_size_dict(A__ , default_to_square=A__)
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
UpperCAmelCase__ : Any = get_resize_output_image_size(A__ , size=size["""shortest_edge"""] , default_to_square=A__)
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Tuple = get_size_dict(A__)
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''')
return center_crop(A__ , size=(size["""height"""], size["""width"""]) , data_format=A__ , **A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase):
return rescale(A__ , scale=A__ , data_format=A__ , **A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Dict = size if size is not None else self.size
UpperCAmelCase__ : Optional[Any] = get_size_dict(A__ , default_to_square=A__)
UpperCAmelCase__ : Tuple = resample if resample is not None else self.resample
UpperCAmelCase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(A__ , param_name="""crop_size""")
UpperCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Dict = make_list_of_images(A__)
if not valid_images(A__):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
UpperCAmelCase__ : Tuple = [to_numpy_array(A__) for image in images]
if do_resize:
UpperCAmelCase__ : Any = [self.resize(image=A__ , size=A__ , resample=A__) for image in images]
if do_center_crop:
UpperCAmelCase__ : List[str] = [self.center_crop(image=A__ , size=A__) for image in images]
if do_rescale:
UpperCAmelCase__ : Any = [self.rescale(image=A__ , scale=A__) for image in images]
if do_normalize:
UpperCAmelCase__ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__) for image in images]
UpperCAmelCase__ : Optional[Any] = [to_channel_dimension_format(A__ , A__) for image in images]
UpperCAmelCase__ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__) != len(A__):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""")
if is_torch_tensor(A__):
UpperCAmelCase__ : Dict = target_sizes.numpy()
UpperCAmelCase__ : int = []
for idx in range(len(A__)):
UpperCAmelCase__ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=A__)
UpperCAmelCase__ : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A__)
else:
UpperCAmelCase__ : List[Any] = logits.argmax(dim=1)
UpperCAmelCase__ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation | 407 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 298 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase : Tuple = '''\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'''
lowerCamelCase : Optional[Any] = '''\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'''
lowerCamelCase : Dict = '''\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'''
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ):
return float((preds == labels).mean() )
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : List[str] = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Any = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
__lowercase : str = float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
__lowercase : int = float(spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] ) -> Tuple:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A__ , A__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A__ , A__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A__ , A__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A__ , A__ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" ) | 149 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 17 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> str:
a : Optional[Any] = list(poly_a or [0] )[:]
a : List[str] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
a : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
a : Union[str, Any] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
a : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
a : int = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
a : Dict = self.__multiply()
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : int = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(A__ ) <= 1:
return dft[0]
#
a : str = self.c_max_length // 2
while next_ncol > 0:
a : Union[str, Any] = [[] for i in range(A__ )]
a : Dict = self.root**next_ncol
# First half of next step
a : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(A__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
a : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(A__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
a : Dict = new_dft
a : List[str] = next_ncol // 2
return dft[0]
def __a ( self ) -> str:
a : List[Any] = self.__dft("A" )
a : int = self.__dft("B" )
a : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
a : Any = 2
while next_ncol <= self.c_max_length:
a : Union[str, Any] = [[] for i in range(A__ )]
a : Union[str, Any] = self.root ** (next_ncol // 2)
a : Dict = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
a : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
a : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Optional[int]:
a : int = "A = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
a : Optional[int] = "B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
a : List[Any] = "A*B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : int , A__ : Optional[int] , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Any , A__ : List[str] , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case__ ( _UpperCamelCase ):
@require_beam
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Tuple = beam.io.parquetio.WriteToParquet
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 666 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_12,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def a__ ( A__ ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
lowerCAmelCase__ : str =parser.parse_args()
lowerCAmelCase__ : Dict =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 101 | import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase =logging.get_logger("transformers.models.speecht5")
def snake_case ( a_ : Tuple , a_ : Optional[int] , a_ : int ) -> Tuple:
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCamelCase_ : Any = checkpoint["input_conv.weight_g"]
UpperCamelCase_ : Union[str, Any] = checkpoint["input_conv.weight_v"]
UpperCamelCase_ : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_ : str = checkpoint[f"upsamples.{i}.1.weight_g"]
UpperCamelCase_ : List[Any] = checkpoint[f"upsamples.{i}.1.weight_v"]
UpperCamelCase_ : Optional[int] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_ : Tuple = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
UpperCamelCase_ : Dict = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
UpperCamelCase_ : Dict = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
UpperCamelCase_ : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
UpperCamelCase_ : int = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
UpperCamelCase_ : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
UpperCamelCase_ : Any = checkpoint["output_conv.1.weight_g"]
UpperCamelCase_ : Optional[int] = checkpoint["output_conv.1.weight_v"]
UpperCamelCase_ : str = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def snake_case ( a_ : List[Any] , a_ : Dict , a_ : Optional[int] , a_ : Dict=None , a_ : Tuple=None , ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
UpperCamelCase_ : int = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase_ )
else:
UpperCamelCase_ : Tuple = SpeechTaHifiGanConfig()
UpperCamelCase_ : Dict = SpeechTaHifiGan(lowerCAmelCase_ )
UpperCamelCase_ : int = torch.load(lowerCAmelCase_ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase_ : str = np.load(lowerCAmelCase_ )
UpperCamelCase_ : int = stats[0].reshape(-1 )
UpperCamelCase_ : Optional[Any] = stats[1].reshape(-1 )
UpperCamelCase_ : List[str] = torch.from_numpy(lowerCAmelCase_ ).float()
UpperCamelCase_ : Union[str, Any] = torch.from_numpy(lowerCAmelCase_ ).float()
model.save_pretrained(lowerCAmelCase_ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase =argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase =parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 208 | from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Any = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ : Tuple = mid + 1
else:
snake_case_ : Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Optional[Any] = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ : Optional[Any] = mid + 1
else:
snake_case_ : Tuple = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Dict = 0
snake_case_ : Tuple = len(lowerCAmelCase_ ) - 1
while left <= right:
snake_case_ : int = left + (right - left) // 2
snake_case_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ : Optional[Any] = midpoint - 1
else:
snake_case_ : Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Optional[int] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if right < left:
return None
snake_case_ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 666 | 0 |
from __future__ import annotations
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
_lowerCamelCase : int =0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_lowerCamelCase : Any =failure[j - 1]
continue
i += 1
return False
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =[0]
_lowerCamelCase : Tuple =0
_lowerCamelCase : int =1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_lowerCamelCase : int =failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase = 'abc1abc12'
lowerCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowerCamelCase = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase = 'ABABX'
lowerCamelCase = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
lowerCamelCase = 'AAAB'
lowerCamelCase = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
lowerCamelCase = 'abcdabcy'
lowerCamelCase = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
lowerCamelCase = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 464 | import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A__ : VQModel , A__ : UNetaDModel , A__ : DDIMScheduler ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : str , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : float = 0.0 , A__ : int = 50 , A__ : Optional[str] = "pil" , A__ : bool = True , **A__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , )
snake_case_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : List[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case_ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VAE
snake_case_ : int = self.vqvae.decode(A__ ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 666 | 0 |
"""simple docstring"""
from math import isqrt
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase_ , lowerCAmelCase_ ):
__snake_case = False
return [i for i in range(2 , lowerCAmelCase_ ) if is_prime[i]]
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 10**8 ) -> List[Any]:
"""simple docstring"""
__snake_case = calculate_prime_numbers(max_number // 2 )
__snake_case = 0
__snake_case = 0
__snake_case = len(lowerCAmelCase_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class _a ( _UpperCamelCase ):
a_ : int = "imagegpt"
a_ : int = ["past_key_values"]
a_ : Optional[int] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any=5_12 + 1 , SCREAMING_SNAKE_CASE__ : Optional[int]=32 * 32 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : Optional[int]=8 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]="quick_gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=1e-5 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = scale_attn_by_inverse_layer_idx
lowerCamelCase__ = reorder_and_upcast_attn
lowerCamelCase__ = tie_word_embeddings
super().__init__(tie_word_embeddings=A__ , **A__ )
class _a ( _UpperCamelCase ):
@property
def _UpperCamelCase ( self : int ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : "FeatureExtractionMixin" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 32 , ):
lowerCamelCase__ = self._generate_dummy_images(A__ , A__ , A__ , A__ )
lowerCamelCase__ = dict(preprocessor(images=A__ , return_tensors=A__ ) )
return inputs
| 510 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : list):
_enforce_args(lowerCAmelCase_ , lowerCAmelCase_)
if n == 0:
return 0
A_ : List[str] = float("""-inf""")
for i in range(1 , n + 1):
A_ : str = max(
lowerCAmelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCAmelCase_))
return max_revue
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : list):
_enforce_args(lowerCAmelCase_ , lowerCAmelCase_)
A_ : str = [float("""-inf""") for _ in range(n + 1)]
return _top_down_cut_rod_recursive(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : list , lowerCamelCase : list):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
A_ : Any = float("""-inf""")
for i in range(1 , n + 1):
A_ : Union[str, Any] = max(
lowerCAmelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCAmelCase_ , lowerCAmelCase_) , )
A_ : List[str] = max_revenue
return max_rev[n]
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : list):
_enforce_args(lowerCAmelCase_ , lowerCAmelCase_)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
A_ : Any = [float("""-inf""") for _ in range(n + 1)]
A_ : Dict = 0
for i in range(1 , n + 1):
A_ : int = max_rev[i]
for j in range(1 , i + 1):
A_ : Union[str, Any] = max(lowerCAmelCase_ , prices[j - 1] + max_rev[i - j])
A_ : Any = max_revenue_i
return max_rev[n]
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : list):
if n < 0:
A_ : List[str] = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowerCAmelCase_)
if n > len(lowerCAmelCase_):
A_ : Tuple = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowerCAmelCase_)}'
)
raise ValueError(lowerCAmelCase_)
def lowerCamelCase ( ):
A_ : List[str] = [6, 10, 12, 15, 20, 23]
A_ : int = len(lowerCAmelCase_)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
A_ : Dict = 36
A_ : Optional[Any] = top_down_cut_rod(lowerCAmelCase_ , lowerCAmelCase_)
A_ : int = bottom_up_cut_rod(lowerCAmelCase_ , lowerCAmelCase_)
A_ : List[str] = naive_cut_rod_recursive(lowerCAmelCase_ , lowerCAmelCase_)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 665 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
UpperCAmelCase__ : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase__ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
UpperCAmelCase__ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 407 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowerCAmelCase__ ( _UpperCamelCase ):
a__ : Any = "openai/whisper-base"
a__ : List[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
a__ : Tuple = "transcriber"
a__ : Dict = WhisperProcessor
a__ : Optional[int] = WhisperForConditionalGeneration
a__ : Dict = ["audio"]
a__ : List[Any] = ["text"]
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
return self.pre_processor(A__ , return_tensors='''pt''' ).input_features
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> str:
return self.model.generate(inputs=A__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
return self.pre_processor.batch_decode(A__ , skip_special_tokens=A__ )[0]
| 298 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 0 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase : Dict = TypeVar('''T''')
class lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : bool = True ) -> None:
"""simple docstring"""
__lowercase : dict[T, list[T]] = {} # dictionary of lists
__lowercase : Any = directed
def lowerCAmelCase ( self : Union[str, Any] , __a : T , __a : T ) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A__ )
self.adj_list[destination_vertex].append(A__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A__ )
__lowercase : Tuple = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A__ )
__lowercase : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__lowercase : str = [destination_vertex]
__lowercase : List[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A__ )
__lowercase : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__lowercase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__lowercase : List[Any] = [destination_vertex]
__lowercase : Any = []
return self
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return pformat(self.adj_list ) | 149 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : int = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
_lowercase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowercase : bool = field(default=_UpperCamelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_lowercase : bool = field(default=_UpperCamelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class lowerCamelCase_ :
_lowercase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_lowercase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_lowercase : Optional[int] = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_lowercase : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'''help''': '''Source language id for translation.'''} )
_lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'''help''': '''Target language id for translation.'''} )
_lowercase : Optional[int] = field(default=_UpperCamelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_lowercase : bool = field(
default=_UpperCamelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> Any:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(lowerCAmelCase_ ,os.path.join(lowerCAmelCase_ ,f"""{split}_results.json""" ) )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__A : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A : Dict = parser.parse_args_into_dataclasses()
check_output_dir(lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" ,lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__A : str = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ):
assert hasattr(lowerCAmelCase_ ,lowerCAmelCase_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowerCAmelCase_ ,lowerCAmelCase_ ,getattr(lowerCAmelCase_ ,lowerCAmelCase_ ) )
__A : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__A : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf=""".ckpt""" in model_args.model_name_or_path ,config=lowerCAmelCase_ ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(lowerCAmelCase_ ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__A : Union[str, Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCAmelCase_ ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
__A : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__A : Any = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCAmelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__A : str = SeqaSeqDataset
# Get datasets
__A : Dict = (
dataset_class(
lowerCAmelCase_ ,type_path="""train""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_train
else None
)
__A : Tuple = (
dataset_class(
lowerCAmelCase_ ,type_path="""val""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__A : int = (
dataset_class(
lowerCAmelCase_ ,type_path="""test""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
__A : List[Any] = (
build_compute_metrics_fn(data_args.task ,lowerCAmelCase_ ) if training_args.predict_with_generate else None
)
__A : int = SeqaSeqTrainer(
model=lowerCAmelCase_ ,args=lowerCAmelCase_ ,data_args=lowerCAmelCase_ ,train_dataset=lowerCAmelCase_ ,eval_dataset=lowerCAmelCase_ ,data_collator=SeqaSeqDataCollator(
lowerCAmelCase_ ,lowerCAmelCase_ ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=lowerCAmelCase_ ,tokenizer=lowerCAmelCase_ ,)
__A : Tuple = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
__A : Dict = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__A : Union[str, Any] = train_result.metrics
__A : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" ,lowerCAmelCase_ ,training_args.output_dir )
all_metrics.update(lowerCAmelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,"""trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__A : int = trainer.evaluate(metric_key_prefix="""val""" )
__A : Dict = data_args.n_val
__A : Union[str, Any] = round(metrics["""val_loss"""] ,4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" ,lowerCAmelCase_ ,training_args.output_dir )
all_metrics.update(lowerCAmelCase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__A : Any = trainer.predict(test_dataset=lowerCAmelCase_ ,metric_key_prefix="""test""" )
__A : Dict = test_output.metrics
__A : str = data_args.n_test
if trainer.is_world_process_zero():
__A : Dict = round(metrics["""test_loss"""] ,4 )
handle_metrics("""test""" ,lowerCAmelCase_ ,training_args.output_dir )
all_metrics.update(lowerCAmelCase_ )
if training_args.predict_with_generate:
__A : int = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=lowerCAmelCase_ ,clean_up_tokenization_spaces=lowerCAmelCase_ )
__A : str = lmap(str.strip ,lowerCAmelCase_ )
write_txt_file(lowerCAmelCase_ ,os.path.join(training_args.output_dir ,"""test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowerCAmelCase_ ,os.path.join(training_args.output_dir ,"""all_results.json""" ) )
return all_metrics
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 17 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
a : str = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCAmelCase_ )
else:
a : List[Any] = sylvester(number - 1 )
a : List[str] = num - 1
a : Tuple = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 633 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a__ ( A__ ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a__ ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE_ : Tuple = [1, 2, 3]
with pytest.raises(lowerCAmelCase_ ):
with parallel_backend('unsupported backend' ):
map_nested(lowerCAmelCase_, lowerCAmelCase_, num_proc=2 )
with pytest.raises(lowerCAmelCase_ ):
with parallel_backend('unsupported backend' ):
map_nested(lowerCAmelCase_, lowerCAmelCase_, num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc', [2, -1] )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [1, 2]
SCREAMING_SNAKE_CASE_ : Any = {"a": 1, "b": 2}
SCREAMING_SNAKE_CASE_ : List[Any] = {"a": [1, 2], "b": [3, 4]}
SCREAMING_SNAKE_CASE_ : Any = {"a": {"1": 1}, "b": 2}
SCREAMING_SNAKE_CASE_ : Tuple = {"a": 1, "b": 2, "c": 3, "d": 4}
SCREAMING_SNAKE_CASE_ : Any = [2, 3]
SCREAMING_SNAKE_CASE_ : str = {"a": 2, "b": 3}
SCREAMING_SNAKE_CASE_ : str = {"a": [2, 3], "b": [4, 5]}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"a": {"1": 2}, "b": 3}
SCREAMING_SNAKE_CASE_ : Optional[int] = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend('spark' ):
assert map_nested(lowerCAmelCase_, lowerCAmelCase_, num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase_, lowerCAmelCase_, num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase_, lowerCAmelCase_, num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase_, lowerCAmelCase_, num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase_, lowerCAmelCase_, num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
| 101 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
super().__init__(
A__ , split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , num_proc=A__ , **A__ , )
UpperCamelCase_ : Union[str, Any] = field
UpperCamelCase_ : Tuple = path_or_paths if isinstance(A__ , A__ ) else {self.split: path_or_paths}
UpperCamelCase_ : List[str] = Json(
cache_dir=A__ , data_files=A__ , features=A__ , field=A__ , **A__ , )
def _UpperCAmelCase ( self ):
if self.streaming:
UpperCamelCase_ : Tuple = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase_ : List[str] = None
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Dict = None
self.builder.download_and_prepare(
download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , num_proc=self.num_proc , )
UpperCamelCase_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=A__ , in_memory=self.keep_in_memory )
return dataset
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
UpperCamelCase_ : Optional[Any] = dataset
UpperCamelCase_ : Optional[Any] = path_or_buf
UpperCamelCase_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase_ : Optional[Any] = num_proc
UpperCamelCase_ : List[Any] = "utf-8"
UpperCamelCase_ : List[Any] = to_json_kwargs
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = self.to_json_kwargs.pop("""path_or_buf""" , A__ )
UpperCamelCase_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
UpperCamelCase_ : Any = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
UpperCamelCase_ : Dict = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
UpperCamelCase_ : List[str] = self.to_json_kwargs.pop("""compression""" , A__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=A__ ) as buffer:
UpperCamelCase_ : Union[str, Any] = self._write(file_obj=A__ , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
""" was passed. Please provide a local path instead.""" )
UpperCamelCase_ : str = self._write(
file_obj=self.path_or_buf , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
return written
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = args
UpperCamelCase_ : Tuple = query_table(
table=self.dataset.data , key=slice(A__ , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase_ : Optional[int] = batch.to_pandas().to_json(
path_or_buf=A__ , orient=A__ , lines=A__ , index=A__ , **A__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ):
UpperCamelCase_ : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
UpperCamelCase_ : List[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A__ )
else:
UpperCamelCase_ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A__ , A__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(A__ )
return written
| 208 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class A ( _UpperCamelCase , unittest.TestCase ):
UpperCamelCase__ : Tuple =FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : str =4
_lowerCamelCase : Any =3
_lowerCamelCase : List[Any] =(32, 32)
_lowerCamelCase : Optional[int] =jax.random.PRNGKey(0 )
_lowerCamelCase : List[str] =jax.random.uniform(A__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] ={
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCamelCase : int =self.dummy_input
return init_dict, inputs_dict
| 464 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
class __magic_name__ :
def __init__( self : Any , snake_case_ : List[str] ):
__snake_case = metric_id
class __magic_name__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowerCAmelCase ( self : Any ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if "tmp_path" in args:
__snake_case = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 163 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def snake_case ( _a: dict )-> Dict:
'''simple docstring'''
return (data["data"], data["target"])
def snake_case ( _a: np.ndarray , _a: np.ndarray )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = XGBClassifier()
classifier.fit(lowerCAmelCase_ , lowerCAmelCase_ )
return classifier
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = load_iris()
lowerCamelCase__ = data_handling(lowerCAmelCase_ )
lowerCamelCase__ = train_test_split(
lowerCAmelCase_ , lowerCAmelCase_ , test_size=0.25 )
lowerCamelCase__ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCamelCase__ = xgboost(lowerCAmelCase_ , lowerCAmelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , display_labels=lowerCAmelCase_ , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 510 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 0 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__magic_name__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
a_ = None
a_ = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
a_ = datasets.Audio()
a_ = "audio"
a_ = AudioFolderConfig
a_ = 42 # definition at the bottom of the script
a_ = AudioClassification(audio_column="""audio""" , label_column="""label""" )
__magic_name__ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__magic_name__ = AUDIO_EXTENSIONS
| 665 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _snake_case ( _UpperCamelCase ):
lowerCAmelCase :Dict = "transfo-xl"
lowerCAmelCase :Tuple = ["mems"]
lowerCAmelCase :str = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCamelCase=26_7735 , _lowerCamelCase=[2_0000, 4_0000, 20_0000] , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=4096 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=18 , _lowerCamelCase=1600 , _lowerCamelCase=1000 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=-1 , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="normal" , _lowerCamelCase=0.01 , _lowerCamelCase=0.01 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Optional[int] = []
self.cutoffs.extend(A__)
if proj_share_all_but_first:
UpperCAmelCase__ : List[str] = [False] + [True] * len(self.cutoffs)
else:
UpperCAmelCase__ : int = [False] + [False] * len(self.cutoffs)
UpperCAmelCase__ : Union[str, Any] = d_model
UpperCAmelCase__ : Dict = d_embed
UpperCAmelCase__ : List[str] = d_head
UpperCAmelCase__ : Any = d_inner
UpperCAmelCase__ : List[Any] = div_val
UpperCAmelCase__ : Dict = pre_lnorm
UpperCAmelCase__ : str = n_layer
UpperCAmelCase__ : Union[str, Any] = n_head
UpperCAmelCase__ : Dict = mem_len
UpperCAmelCase__ : int = same_length
UpperCAmelCase__ : Dict = attn_type
UpperCAmelCase__ : Union[str, Any] = clamp_len
UpperCAmelCase__ : Dict = sample_softmax
UpperCAmelCase__ : Optional[int] = adaptive
UpperCAmelCase__ : Any = dropout
UpperCAmelCase__ : Dict = dropatt
UpperCAmelCase__ : Tuple = untie_r
UpperCAmelCase__ : str = init
UpperCAmelCase__ : Optional[Any] = init_range
UpperCAmelCase__ : Optional[Any] = proj_init_std
UpperCAmelCase__ : List[str] = init_std
UpperCAmelCase__ : int = layer_norm_epsilon
super().__init__(eos_token_id=A__ , **A__)
@property
def snake_case__ ( self):
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''')
return -1
@max_position_embeddings.setter
def snake_case__ ( self , _lowerCamelCase):
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''') | 407 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( _UpperCamelCase , unittest.TestCase ):
a__ : Optional[int] = KandinskyVaaPriorPipeline
a__ : Optional[int] = ["prompt"]
a__ : Union[str, Any] = ["prompt", "negative_prompt"]
a__ : Dict = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a__ : Tuple = False
@property
def __A ( self : List[str] ) -> Union[str, Any]:
return 32
@property
def __A ( self : Union[str, Any] ) -> str:
return 32
@property
def __A ( self : Optional[int] ) -> List[str]:
return self.time_input_dim
@property
def __A ( self : List[Any] ) -> Any:
return self.time_input_dim * 4
@property
def __A ( self : str ) -> Any:
return 1_00
@property
def __A ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(A__ )
@property
def __A ( self : List[Any] ) -> Tuple:
torch.manual_seed(0 )
__lowerCamelCase = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__lowerCamelCase = PriorTransformer(**A__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__lowerCamelCase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __A ( self : Union[str, Any] ) -> Dict:
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__lowerCamelCase = CLIPVisionModelWithProjection(A__ )
return model
@property
def __A ( self : Any ) -> Dict:
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=A__ , do_normalize=A__ , do_resize=A__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
def __A ( self : List[Any] ) -> List[str]:
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=A__ , clip_sample_range=10.0 , )
__lowerCamelCase = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=0 ) -> List[Any]:
if str(A__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(A__ )
else:
__lowerCamelCase = torch.Generator(device=A__ ).manual_seed(A__ )
__lowerCamelCase = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = "cpu"
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**A__ )
__lowerCamelCase = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(A__ ) )
__lowerCamelCase = output.image_embeds
__lowerCamelCase = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
__lowerCamelCase = image[0, -10:]
__lowerCamelCase = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__lowerCamelCase = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self : Any ) -> List[Any]:
__lowerCamelCase = torch_device == "cpu"
__lowerCamelCase = True
__lowerCamelCase = False
self._test_inference_batch_single_identical(
test_max_difference=A__ , relax_max_difference=A__ , test_mean_pixel_difference=A__ , )
@skip_mps
def __A ( self : List[str] ) -> Optional[int]:
__lowerCamelCase = torch_device == "cpu"
__lowerCamelCase = False
self._test_attention_slicing_forward_pass(
test_max_difference=A__ , test_mean_pixel_difference=A__ , )
| 298 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = 0
@slow
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase : Dict = AutoTokenizer.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(A__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(A__ ) , 0 )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = AutoTokenizer.from_pretrained(A__ )
self.assertIsInstance(A__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = AutoTokenizer.from_pretrained(A__ )
self.assertIsInstance(A__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = AutoConfig.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
# Check that tokenizer_type ≠ model_type
__lowercase : str = AutoTokenizer.from_pretrained(A__ , config=A__ )
self.assertIsInstance(A__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(A__ , """vocab.txt""" ) )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(A__ , tokenizer_type="""bert""" , use_fast=A__ )
self.assertIsInstance(A__ , A__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(A__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(A__ , """merges.txt""" ) )
__lowercase : Any = AutoTokenizer.from_pretrained(A__ , tokenizer_type="""gpt2""" , use_fast=A__ )
self.assertIsInstance(A__ , A__ )
@require_tokenizers
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(A__ , """vocab.txt""" ) )
__lowercase : Any = AutoTokenizer.from_pretrained(A__ , tokenizer_type="""bert""" )
self.assertIsInstance(A__ , A__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(A__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(A__ , """merges.txt""" ) )
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(A__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(A__ , A__ )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
with pytest.raises(A__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase : Optional[Any] = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(A__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(A__ , A__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , A__ )
else:
self.assertEqual(tokenizer.do_lower_case , A__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
A__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__lowercase : str = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : int = TOKENIZER_MAPPING.values()
__lowercase : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(A__ )
@require_tokenizers
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=A__ ) , A__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , A__ )
@require_tokenizers
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=A__ )
__lowercase : List[str] = "Hello, world. How are you?"
__lowercase : Optional[int] = tokenizer.tokenize(A__ )
self.assertEqual("""[UNK]""" , tokens[0] )
__lowercase : List[str] = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=A__ )
__lowercase : str = tokenizer.tokenize(A__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(A__ ) , A__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(A__ )
self.assertIsInstance(A__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A__ )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(A__ )
self.assertIsInstance(A__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : int = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(A__ , A__ )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Any = get_tokenizer_config("""bert-base-cased""" )
__lowercase : Dict = config.pop("""_commit_hash""" , A__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(A__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase : Optional[int] = get_tokenizer_config(A__ )
self.assertDictEqual(A__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase : Any = AutoTokenizer.from_pretrained(A__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A__ )
__lowercase : Tuple = get_tokenizer_config(A__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , A__ )
AutoTokenizer.register(A__ , slow_tokenizer_class=A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoTokenizer.register(A__ , slow_tokenizer_class=A__ )
__lowercase : Union[str, Any] = CustomTokenizer.from_pretrained(A__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A__ )
__lowercase : Tuple = AutoTokenizer.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , A__ )
# Can register in two steps
AutoTokenizer.register(A__ , slow_tokenizer_class=A__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(A__ , fast_tokenizer_class=A__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
A__ , slow_tokenizer_class=A__ , fast_tokenizer_class=A__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoTokenizer.register(A__ , fast_tokenizer_class=A__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : int = BertTokenizerFast.from_pretrained(A__ )
bert_tokenizer.save_pretrained(A__ )
__lowercase : Dict = CustomTokenizerFast.from_pretrained(A__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A__ )
__lowercase : Tuple = AutoTokenizer.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
__lowercase : str = AutoTokenizer.from_pretrained(A__ , use_fast=A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
with self.assertRaises(A__ ):
__lowercase : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
__lowercase : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=A__ )
__lowercase : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=A__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A__ )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(A__ , trust_remote_code=A__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=A__ , use_fast=A__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A__ )
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(A__ , trust_remote_code=A__ , use_fast=A__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
class lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A : Dict = False
class lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A : Any = NewTokenizer
_A : Tuple = False
try:
AutoConfig.register("""custom""" , A__ )
AutoTokenizer.register(A__ , slow_tokenizer_class=A__ )
AutoTokenizer.register(A__ , fast_tokenizer_class=A__ )
# If remote code is not set, the default is to use local
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=A__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowercase : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=A__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=A__ , use_fast=A__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=A__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=A__ , use_fast=A__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=A__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=A__ , use_fast=A__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
A__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
A__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase : str = AutoTokenizer.from_pretrained(A__ , revision="""aaaaaa""" )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowercase : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 149 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> Dict: # This function is recursive
__A : int = len(lowerCAmelCase_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__A : Any = array[0]
__A : List[str] = False
__A : Union[str, Any] = 1
__A : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__A : Tuple = True
__A : Dict = [element for element in array[i:] if element >= array[i]]
__A : Any = longest_subsequence(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > len(lowerCAmelCase_ ):
__A : Optional[int] = temp_array
else:
i += 1
__A : Any = [element for element in array[1:] if element >= pivot]
__A : Dict = [pivot, *longest_subsequence(lowerCAmelCase_ )]
if len(lowerCAmelCase_ ) > len(lowerCAmelCase_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _UpperCamelCase ):
lowerCamelCase : Optional[int] =(KDPMaDiscreteScheduler,)
lowerCamelCase : Union[str, Any] =10
def __a ( self , **lowerCAmelCase__ ) -> Optional[Any]:
a : str = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**A__ )
return config
def __a ( self ) -> int:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A__ )
def __a ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __a ( self ) -> Dict:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __a ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __a ( self ) -> int:
a : List[str] = self.scheduler_classes[0]
a : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
a : Optional[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a : List[str] = self.dummy_model()
a : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : str = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a : int = scheduler.scale_model_input(A__ , A__ )
a : Optional[int] = model(A__ , A__ )
a : Union[str, Any] = scheduler.step(A__ , A__ , A__ )
a : Union[str, Any] = output.prev_sample
a : str = torch.sum(torch.abs(A__ ) )
a : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __a ( self ) -> Optional[int]:
if torch_device == "mps":
return
a : Any = self.scheduler_classes[0]
a : Any = self.get_scheduler_config()
a : Any = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a : str = self.dummy_model()
a : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
a : List[str] = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a : Dict = scheduler.scale_model_input(A__ , A__ )
a : Optional[int] = model(A__ , A__ )
a : List[str] = scheduler.step(A__ , A__ , A__ )
a : Optional[Any] = output.prev_sample
a : Optional[Any] = torch.sum(torch.abs(A__ ) )
a : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __a ( self ) -> Optional[Any]:
if torch_device == "mps":
return
a : Optional[Any] = self.scheduler_classes[0]
a : List[str] = self.get_scheduler_config()
a : int = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a : Optional[int] = self.dummy_model()
a : str = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a : int = scheduler.scale_model_input(A__ , A__ )
a : Optional[int] = model(A__ , A__ )
a : Dict = scheduler.step(A__ , A__ , A__ )
a : Optional[Any] = output.prev_sample
a : List[str] = torch.sum(torch.abs(A__ ) )
a : Optional[int] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 633 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : int , A__ : Optional[int] , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Any , A__ : List[str] , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case__ ( _UpperCamelCase ):
@require_beam
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Tuple = beam.io.parquetio.WriteToParquet
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 666 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase__ : Dict =logging.get_logger(__name__)
class __lowercase (_UpperCamelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , A__ , )
super().__init__(*A__ , **A__ )
| 101 | import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase ={
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =["ConditionalDetrFeatureExtractor"]
UpperCamelCase =["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 | from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Any = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ : Tuple = mid + 1
else:
snake_case_ : Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Optional[Any] = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ : Optional[Any] = mid + 1
else:
snake_case_ : Tuple = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Dict = 0
snake_case_ : Tuple = len(lowerCAmelCase_ ) - 1
while left <= right:
snake_case_ : int = left + (right - left) // 2
snake_case_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ : Optional[Any] = midpoint - 1
else:
snake_case_ : Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Optional[int] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if right < left:
return None
snake_case_ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 666 | 0 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
super().__init__()
A_ : Optional[int] = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : List[str] = power
A_ : str = decay_schedule_fn
A_ : int = name
def __call__(self , lowercase ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : List[str] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Any = global_step_float / warmup_steps_float
A_ : Dict = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def _a (self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 0.9 , lowerCamelCase__ = 0.999 , lowerCamelCase__ = 1E-8 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ):
'''simple docstring'''
A_ : Any = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCamelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCamelCase__ , )
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=lowerCamelCase__ , decay_schedule_fn=lowerCamelCase__ , warmup_steps=lowerCamelCase__ , )
if weight_decay_rate > 0.0:
A_ : str = AdamWeightDecay(
learning_rate=lowerCamelCase__ , weight_decay_rate=lowerCamelCase__ , beta_a=lowerCamelCase__ , beta_a=lowerCamelCase__ , epsilon=lowerCamelCase__ , clipnorm=lowerCamelCase__ , global_clipnorm=lowerCamelCase__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=lowerCamelCase__ , )
else:
A_ : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=lowerCamelCase__ , beta_a=lowerCamelCase__ , beta_a=lowerCamelCase__ , epsilon=lowerCamelCase__ , clipnorm=lowerCamelCase__ , global_clipnorm=lowerCamelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase = 0.0_01 , lowercase = 0.9 , lowercase = 0.9_99 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Optional[int] = weight_decay_rate
A_ : Tuple = include_in_weight_decay
A_ : Dict = exclude_from_weight_decay
@classmethod
def _a (cls , lowercase ):
A_ : List[Any] = {"""WarmUp""": WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def _a (self , lowercase , lowercase , lowercase ):
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Dict = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a (self , lowercase , lowercase=None , **lowercase ):
A_, A_ : Any = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : Any = apply_state or {}
A_ : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Tuple = self._fallback_apply_state(lowercase , lowercase )
A_ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a (self , lowercase , lowercase , lowercase=None ):
A_, A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : List[str] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase=None ):
A_, A_ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[int] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def _a (self ):
A_ : List[Any] = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a (self , lowercase ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self ):
A_ : str = []
A_ : Any = None
@property
def _a (self ):
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a (self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__(self , lowercase ):
if not self._gradients:
A_ : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(lowercase )}' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def _a (self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) ) | 667 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod() | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase :List[str] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ['''MobileViTFeatureExtractor''']
lowerCamelCase :Union[str, Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCamelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(lowerCamelCase__ , ["""torch"""] )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(lowerCamelCase__ , ["""torch"""] )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(lowerCamelCase__ , ["""torch"""] )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(lowerCamelCase__ , ["""torch"""] )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(lowerCamelCase__ , ["""torch"""] )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(lowerCamelCase__ , ["""torch"""] )
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(lowerCamelCase__ , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['torch']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] )
@classmethod
def _a (cls , *lowercase , **lowercase ):
requires_backends(cls , ["""torch"""] ) | 667 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :List[str] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowerCamelCase :Dict = 8
def a ( lowerCamelCase__ , lowerCamelCase__=BITS ):
'''simple docstring'''
A_ : int = x.device
A_ : List[str] = (x * 2_55).int().clamp(0 , 2_55 )
A_ : List[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase__ )
A_ : Tuple = rearrange(lowerCamelCase__ , """d -> d 1 1""" )
A_ : int = rearrange(lowerCamelCase__ , """b c h w -> b c 1 h w""" )
A_ : Any = ((x & mask) != 0).float()
A_ : Optional[int] = rearrange(lowerCamelCase__ , """b c d h w -> b (c d) h w""" )
A_ : Any = bits * 2 - 1
return bits
def a ( lowerCamelCase__ , lowerCamelCase__=BITS ):
'''simple docstring'''
A_ : int = x.device
A_ : Dict = (x > 0).int()
A_ : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase__ , dtype=torch.intaa )
A_ : List[Any] = rearrange(lowerCamelCase__ , """d -> d 1 1""" )
A_ : Tuple = rearrange(lowerCamelCase__ , """b (c d) h w -> b c d h w""" , d=8 )
A_ : str = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = True , lowerCamelCase__=None , lowerCamelCase__ = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A_ : int = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A_ : Dict = self.alphas_cumprod[timestep]
A_ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A_ : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A_ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A_ : Tuple = self.bit_scale
if self.config.clip_sample:
A_ : List[Any] = torch.clamp(lowerCamelCase__ , -scale , lowerCamelCase__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A_ : Tuple = self._get_variance(lowerCamelCase__ , lowerCamelCase__ )
A_ : int = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A_ : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A_ : Tuple = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A_ : Union[str, Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A_ : str = model_output.device if torch.is_tensor(lowerCamelCase__ ) else """cpu"""
A_ : List[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = self._get_variance(lowerCamelCase__ , lowerCamelCase__ ) ** 0.5 * eta * noise
A_ : Optional[Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="epsilon" , lowerCamelCase__=None , lowerCamelCase__ = True , ):
'''simple docstring'''
A_ : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A_, A_ : int = torch.split(lowerCamelCase__ , sample.shape[1] , dim=1 )
else:
A_ : Optional[Any] = None
# 1. compute alphas, betas
A_ : Optional[int] = self.alphas_cumprod[t]
A_ : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
A_ : Dict = 1 - alpha_prod_t
A_ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A_ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A_ : int = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A_ : Any = self.bit_scale
if self.config.clip_sample:
A_ : Any = torch.clamp(lowerCamelCase__ , -scale , lowerCamelCase__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ : int = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A_ : Any = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ : Dict = 0
if t > 0:
A_ : Dict = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase__ ).to(model_output.device )
A_ : str = (self._get_variance(lowerCamelCase__ , predicted_variance=lowerCamelCase__ ) ** 0.5) * noise
A_ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase = 1.0 , ):
super().__init__()
A_ : Dict = bit_scale
A_ : int = (
ddim_bit_scheduler_step if isinstance(lowercase , lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowercase , scheduler=lowercase )
@torch.no_grad()
def __call__(self , lowercase = 256 , lowercase = 256 , lowercase = 50 , lowercase = None , lowercase = 1 , lowercase = "pil" , lowercase = True , **lowercase , ):
A_ : Tuple = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowercase , )
A_ : str = decimal_to_bits(lowercase ) * self.bit_scale
A_ : List[Any] = latents.to(self.device )
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A_ : Optional[int] = self.unet(lowercase , lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ : Optional[Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
A_ : Optional[Any] = bits_to_decimal(lowercase )
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase ) | 667 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
lowerCamelCase :Optional[int] = logging.getLogger(__name__)
lowerCamelCase :List[str] = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = 'bertabs'
def __init__(self , lowercase=30522 , lowercase=512 , lowercase=6 , lowercase=512 , lowercase=8 , lowercase=512 , lowercase=0.2 , lowercase=6 , lowercase=768 , lowercase=8 , lowercase=2048 , lowercase=0.2 , **lowercase , ):
super().__init__(**lowercase )
A_ : str = vocab_size
A_ : List[str] = max_pos
A_ : List[Any] = enc_layers
A_ : str = enc_hidden_size
A_ : int = enc_heads
A_ : int = enc_ff_size
A_ : Any = enc_dropout
A_ : Tuple = dec_layers
A_ : List[Any] = dec_hidden_size
A_ : Any = dec_heads
A_ : str = dec_ff_size
A_ : Union[str, Any] = dec_dropout | 667 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = tmp_path / """cache"""
A_ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : List[str] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__ , lowerCamelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = tmp_path / """cache"""
A_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Dict = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
A_ : int = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = tmp_path / """cache"""
A_ : Tuple = os.path.join(lowerCamelCase__ , """tmp.sql""" )
A_ : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
A_ : Union[str, Any] = iter_sql_file(lowerCamelCase__ )
A_ : Dict = iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = tmp_path / """cache"""
A_ : Optional[Any] = os.path.join(lowerCamelCase__ , """tmp.sql""" )
A_ : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
A_ : Tuple = iter_sql_file(lowerCamelCase__ )
A_ : List[Any] = iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = tmp_path / """cache"""
A_ : Any = os.path.join(lowerCamelCase__ , """tmp.sql""" )
A_ : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
with pytest.raises(lowerCamelCase__ ):
SqlDatasetWriter(lowerCamelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write() | 667 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[int] = tempfile.mkdtemp()
# fmt: off
A_ : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A_ : Union[str, Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
A_ : Dict = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase , lowercase )
def _a (self , **lowercase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def _a (self , **lowercase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[int] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = self.get_tokenizer()
A_ : Any = self.get_image_processor()
A_ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _a (self ):
A_ : int = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : List[str] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _a (self ):
A_ : Tuple = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Tuple = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Optional[Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Any = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : int = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Union[str, Any] = """lower newer"""
A_ : List[str] = processor(text=lowercase )
A_ : Tuple = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a (self ):
A_ : str = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : int = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Tuple = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[Any] = processor.batch_decode(lowercase )
A_ : Optional[Any] = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Dict = self.prepare_image_inputs()
A_ : Optional[int] = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__(self , lowercase , **lowercase ):
return super().__call__(lowercase , **lowercase )
def _a (self , **lowercase ):
A_ : Dict = {}
if "candidate_labels" in kwargs:
A_ : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A_ : Union[str, Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _a (self , lowercase , lowercase=None , lowercase="This is a photo of {}." ):
A_ : int = load_image(lowercase )
A_ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
A_ : Tuple = candidate_labels
A_ : Tuple = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A_ : Any = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A_ : Any = [text_inputs]
return inputs
def _a (self , lowercase ):
A_ : Optional[Any] = model_inputs.pop("""candidate_labels""" )
A_ : Any = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowercase ):
A_ : Tuple = text_inputs[0]
else:
# Batching case.
A_ : List[str] = text_inputs[0][0]
A_ : Optional[int] = self.model(**lowercase , **lowercase )
A_ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _a (self , lowercase ):
A_ : int = model_outputs.pop("""candidate_labels""" )
A_ : Union[str, Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
A_ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
A_ : List[Any] = probs.tolist()
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = [scores]
elif self.framework == "tf":
A_ : Any = stable_softmax(lowercase , axis=-1 )
A_ : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A_ : Union[str, Any] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result | 667 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 1 |
'''simple docstring'''
from collections import defaultdict
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = first_str.lower().strip()
A_ : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
A_ : Tuple = first_str.replace(""" """ , """""" )
A_ : str = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
A_ : defaultdict[str, int] = defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase :Optional[Any] = input('''Enter the first string ''').strip()
lowerCamelCase :Optional[int] = input('''Enter the second string ''').strip()
lowerCamelCase :Optional[Any] = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.") | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCamelCase :Optional[Any] = pytest.mark.integration
lowerCamelCase :Any = {'''comet'''}
lowerCamelCase :Union[str, Any] = importlib.util.find_spec('''fairseq''') is not None
lowerCamelCase :Any = {'''code_eval'''}
lowerCamelCase :List[str] = os.name == '''nt'''
lowerCamelCase :int = {'''bertscore''', '''frugalscore''', '''perplexity'''}
lowerCamelCase :Dict = importlib.util.find_spec('''transformers''') is not None
def a ( lowerCamelCase__ ):
'''simple docstring'''
@wraps(lowerCamelCase__ )
def wrapper(self , lowerCamelCase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , lowerCamelCase__ )
return wrapper
def a ( lowerCamelCase__ ):
'''simple docstring'''
@wraps(lowerCamelCase__ )
def wrapper(self , lowerCamelCase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , lowerCamelCase__ )
return wrapper
def a ( lowerCamelCase__ ):
'''simple docstring'''
@wraps(lowerCamelCase__ )
def wrapper(self , lowerCamelCase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , lowerCamelCase__ )
return wrapper
def a ( ):
'''simple docstring'''
A_ : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@local
class _lowerCAmelCase ( parameterized.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = {}
__SCREAMING_SNAKE_CASE : Tuple = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def _a (self , lowercase ):
A_ : Dict = """[...]"""
A_ : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase ) ).module_path )
A_ : Dict = datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase )
# check parameters
A_ : List[str] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowercase , metric_module.__name__ ):
with self.use_local_metrics():
try:
A_ : List[str] = doctest.testmod(lowercase , verbose=lowercase , raise_on_error=lowercase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _a (self , lowercase ):
A_ : int = """[...]"""
A_ : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase ) ).module_path )
# run doctest
with self.use_local_metrics():
A_ : str = doctest.testmod(lowercase , verbose=lowercase , raise_on_error=lowercase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _a (self , lowercase , lowercase ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase ):
yield
else:
yield
@contextmanager
def _a (self ):
def load_local_metric(lowercase , *lowercase , **lowercase ):
return load_metric(os.path.join("""metrics""" , lowercase ) , *lowercase , **lowercase )
with patch("""datasets.load_metric""" ) as mock_load_metric:
A_ : Optional[int] = load_local_metric
yield
@classmethod
def _a (cls , lowercase ):
def wrapper(lowercase ):
A_ : Tuple = contextmanager(lowercase )
A_ : List[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
A_ : Union[str, Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
import torch
def bert_cos_score_idf(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCamelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
A_ : List[Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
def load_from_checkpoint(lowerCamelCase__ ):
class _lowerCAmelCase :
def _a (self , lowercase , *lowercase , **lowercase ):
assert len(lowercase ) == 2
A_ : Union[str, Any] = [0.19, 0.92]
return scores, sum(lowercase ) / len(lowercase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
A_ : int = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
A_ : str = load_from_checkpoint
yield
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
A_ : Optional[int] = """ERROR"""
A_ : Optional[Any] = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCamelCase__ ) | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 1 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = '''▁'''
lowerCamelCase :str = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
lowerCamelCase :Optional[int] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
lowerCamelCase :Optional[Any] = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
lowerCamelCase :Union[str, Any] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
lowerCamelCase :Tuple = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ["input_ids"]
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__(self , lowercase , lowercase=None , lowercase=False , lowercase="utf8" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase = None , **lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , vocab_file=lowercase , encoding=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A_ : Union[str, Any] = do_lower_case
A_ : List[str] = sentencepiece_model_ckpt
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A_ : List[Any] = self.load_vocab(filepath=lowercase )
else:
A_ : List[str] = {self.sp_model.id_to_piece(lowercase ): id for id in range(self.sp_model.get_piece_size() )}
A_ : Union[str, Any] = {v: k for k, v in self.vocab.items()}
def _a (self , lowercase ):
if text is None:
return None
A_ : str = self.tokenize(lowercase )
A_, A_ : List[Any] = """""", []
for i, ch in enumerate(lowercase ):
if ch in self.SP_CHAR_MAPPING:
A_ : Optional[Any] = self.SP_CHAR_MAPPING.get(lowercase )
else:
A_ : Dict = unicodedata.normalize("""NFKC""" , lowercase )
if self.is_whitespace(lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase ) )
A_, A_, A_ : str = normalized_text, [], 0
if self.do_lower_case:
A_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A_ : Optional[int] = token[1:]
A_ : List[Any] = text[offset:].index(lowercase ) + offset
A_ : Any = start + len(lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A_ : Optional[int] = end
return token_mapping
@property
def _a (self ):
return len(self.vocab )
def _a (self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__(self ):
A_ : int = self.__dict__.copy()
A_ : Optional[int] = None
return state
def __setstate__(self , lowercase ):
A_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Union[str, Any] = {}
A_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _a (self , lowercase ):
return "".join((self.SP_CHAR_MAPPING.get(lowercase , lowercase ) for c in text) )
def _a (self , lowercase , lowercase=False , lowercase=64 , lowercase=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
A_ : Dict = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
A_ : List[str] = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
A_ : Optional[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
A_ : Any = self.sp_model.EncodeAsPieces(lowercase )
else:
A_ : Optional[Any] = self.sp_model.SampleEncodeAsPieces(lowercase , lowercase , lowercase )
A_ : Optional[int] = []
for pi, piece in enumerate(lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase ) and pi != 0:
new_pieces.append(lowercase )
continue
else:
continue
A_ : List[Any] = 0
for i, chunk in enumerate(lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase ) or self.is_punct(lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowercase )
A_ : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ : Optional[int] = i
if len(lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _a (self , lowercase ):
A_ : Tuple = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _a (self , lowercase ):
A_ : Tuple = self.convert_ids_to_tokens(lowercase )
A_ : Optional[int] = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _a (self , lowercase ):
return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) )
def _a (self , lowercase ):
return self.reverse_vocab.get(lowercase , self.unk_token )
def _a (self , lowercase , lowercase=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
A_ : Any = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _a (self , lowercase , lowercase=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _a (self , lowercase , lowercase=None , lowercase=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1]
def _a (self , lowercase , lowercase = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase ) + 1) + [1] * (len(lowercase ) + 3)
def _a (self , lowercase ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _a (self , lowercase ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _a (self , lowercase ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _a (self , lowercase ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase ) == 1:
A_ : Union[str, Any] = unicodedata.category(lowercase )
if cat == "Zs":
return True
return False
def _a (self , lowercase ):
A_ : str = {}
with io.open(lowercase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(lowercase ):
A_ : Optional[int] = line.rstrip("""\n""" )
A_ : str = int(lowercase )
return token_to_idx
def _a (self , lowercase , lowercase = None ):
A_ : Dict = 0
if os.path.isdir(lowercase ):
A_ : List[Any] = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
A_ : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
A_ : Tuple = token_index
writer.write(token + """\n""" )
index += 1
A_ : str = os.path.join(lowercase , """sentencepiece.bpe.model""" )
with open(lowercase , """wb""" ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (vocab_file,) | 667 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 1 |
'''simple docstring'''
from math import loga
def a ( lowerCamelCase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCamelCase :Optional[Any] = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCamelCase :str = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : Node | None
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : Node | None = None
for i in sorted(lowercase , reverse=lowercase ):
A_ : Dict = Node(lowercase , self.head )
def __iter__(self ):
A_ : List[Any] = self.head
while node:
yield node.data
A_ : Optional[int] = node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowercase ) for node in self] )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return SortedLinkedList(list(lowerCamelCase__ ) + list(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Dict = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = [[float("""inf""" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
A_ : Optional[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : int = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__ , lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowerCamelCase :List[str] = int(input('''Enter number of vertices: '''))
lowerCamelCase :Optional[Any] = int(input('''Enter number of edges: '''))
lowerCamelCase :List[str] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCamelCase :Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCamelCase :int = int(input('''Enter source:'''))
lowerCamelCase :Any = int(input('''Enter destination:'''))
lowerCamelCase :Tuple = float(input('''Enter weight:'''))
lowerCamelCase :List[str] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0 | 667 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 1 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'data2vec-audio'
def __init__(self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : Optional[int] = hidden_size
A_ : Any = feat_extract_activation
A_ : List[str] = list(lowercase )
A_ : List[Any] = list(lowercase )
A_ : Any = list(lowercase )
A_ : int = conv_bias
A_ : Tuple = num_conv_pos_embeddings
A_ : Dict = num_conv_pos_embedding_groups
A_ : Any = conv_pos_kernel_size
A_ : str = len(self.conv_dim )
A_ : int = num_hidden_layers
A_ : Optional[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : int = num_attention_heads
A_ : List[str] = hidden_dropout
A_ : Tuple = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : Any = feat_proj_dropout
A_ : Any = final_dropout
A_ : Any = layerdrop
A_ : List[str] = layer_norm_eps
A_ : int = initializer_range
A_ : Tuple = vocab_size
A_ : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Union[str, Any] = mask_time_prob
A_ : int = mask_time_length
A_ : Dict = mask_time_min_masks
A_ : Tuple = mask_feature_prob
A_ : Tuple = mask_feature_length
A_ : List[str] = mask_feature_min_masks
# ctc loss
A_ : Optional[int] = ctc_loss_reduction
A_ : Union[str, Any] = ctc_zero_infinity
# adapter
A_ : Tuple = add_adapter
A_ : Optional[int] = adapter_kernel_size
A_ : Optional[Any] = adapter_stride
A_ : Any = num_adapter_layers
A_ : Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Optional[Any] = list(lowercase )
A_ : Optional[int] = xvector_output_dim
@property
def _a (self ):
return math.prod(self.conv_stride ) | 667 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import math
import os
import sys
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = """"""
try:
with open(lowerCamelCase__ , """rb""" ) as binary_file:
A_ : List[str] = binary_file.read()
for dat in data:
A_ : str = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
lexicon.pop(lowerCamelCase__ )
A_ : str = last_match_id
if math.loga(lowerCamelCase__ ).is_integer():
for curr_key in lexicon:
A_ : int = """0""" + lexicon[curr_key]
A_ : Dict = bin(lowerCamelCase__ )[2:]
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {"""0""": """0""", """1""": """1"""}
A_, A_ : Optional[Any] = """""", """"""
A_ : Optional[Any] = len(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A_ : Tuple = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
index += 1
A_ : Optional[int] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A_ : str = lexicon[curr_string]
result += last_match_id
return result
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = os.path.getsize(lowerCamelCase__ )
A_ : int = bin(lowerCamelCase__ )[2:]
A_ : Optional[Any] = len(lowerCamelCase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = 8
try:
with open(lowerCamelCase__ , """wb""" ) as opened_file:
A_ : Any = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = read_file_binary(lowerCamelCase__ )
A_ : str = compress_data(lowerCamelCase__ )
A_ : Union[str, Any] = add_file_length(lowerCamelCase__ , lowerCamelCase__ )
write_file_binary(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 667 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if point:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for item in point:
if not isinstance(lowerCamelCase__ , (int, float) ):
A_ : int = (
"""Expected a list of numbers as input, found """
f'{type(lowerCamelCase__ ).__name__}'
)
raise TypeError(lowerCamelCase__ )
else:
A_ : Any = f'Expected a list of numbers as input, found {type(lowerCamelCase__ ).__name__}'
raise TypeError(lowerCamelCase__ )
else:
raise ValueError("""Missing an input""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.