code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import math
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 7
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def a__ ( ):
UpperCAmelCase_ = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = 1000
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = UpperCAmelCase_ = CvtConfig(num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
UpperCAmelCase_ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
UpperCAmelCase_ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase_ = [2, 2, 20]
UpperCAmelCase_ = [3, 12, 16]
UpperCAmelCase_ = [192, 768, 1024]
UpperCAmelCase_ = CvtForImageClassification(lowerCAmelCase__ )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=torch.device("cpu" ) )
UpperCAmelCase_ = OrderedDict()
UpperCAmelCase_ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase_ = list_of_state_dict + cls_token(lowerCAmelCase__ )
UpperCAmelCase_ = list_of_state_dict + embeddings(lowerCAmelCase__ )
for cnt in range(config.depth[idx] ):
UpperCAmelCase_ = list_of_state_dict + attention(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 14
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Optional[Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE_ : Tuple = len(snake_case__ ) - 1
def snake_case ( self ,snake_case__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,snake_case__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case__ ) ,5 ) == 1
return output_values
def snake_case ( self ,snake_case__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : int = self.basis_function(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 0.0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self ,snake_case__ = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE_ : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE_ : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE_ : int = self.bezier_curve_function(snake_case__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE_ : Dict = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
snake_case__ ,snake_case__ ,color='blue' ,label='Curve of Degree ' + str(self.degree ) ,)
plt.scatter(snake_case__ ,snake_case__ ,color='red' ,label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 105
| 1
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCamelCase ( a : Any , a : Any=0.999 , a : Any="cosine" , ) ->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
snake_case = []
for i in range(a ):
snake_case = i / num_diffusion_timesteps
snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class _lowercase ( __a , __a ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self , A__ = 10_00 , A__ = 0.0_0_0_8_5 , A__ = 0.0_1_2 , A__ = "linear" , A__ = None , A__ = "epsilon" , A__ = "linspace" , A__ = 0 , ):
if trained_betas is not None:
snake_case = torch.tensor(A__ , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case = torch.linspace(A__ , A__ , A__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case = betas_for_alpha_bar(A__ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
snake_case = 1.0 - self.betas
snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A__ , A__ , A__ )
def UpperCamelCase ( self , A__ , A__=None ):
if schedule_timesteps is None:
snake_case = self.timesteps
snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case = 1 if len(A__ ) > 1 else 0
else:
snake_case = timestep.cpu().item() if torch.is_tensor(A__ ) else timestep
snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self , A__ , A__ , ):
snake_case = self.index_for_timestep(A__ )
if self.state_in_first_order:
snake_case = self.sigmas[step_index]
else:
snake_case = self.sigmas_interpol[step_index]
snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , ):
snake_case = num_inference_steps
snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case = np.linspace(0 , num_train_timesteps - 1 , A__ , dtype=A__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case = (np.arange(0 , A__ ) * step_ratio).round()[::-1].copy().astype(A__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case = (np.arange(A__ , 0 , -step_ratio )).round().copy().astype(A__ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case = torch.from_numpy(np.log(A__ ) ).to(A__ )
snake_case = np.interp(A__ , np.arange(0 , len(A__ ) ) , A__ )
snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case = torch.from_numpy(A__ ).to(device=A__ )
# interpolate sigmas
snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
snake_case = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(A__ ).startswith('''mps''' ):
# mps does not support float64
snake_case = torch.from_numpy(A__ ).to(A__ , dtype=torch.floataa )
else:
snake_case = torch.from_numpy(A__ ).to(A__ )
# interpolate timesteps
snake_case = self.sigma_to_t(A__ ).to(A__ , dtype=timesteps.dtype )
snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
snake_case = torch.cat([timesteps[:1], interleaved_timesteps] )
snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case = defaultdict(A__ )
def UpperCamelCase ( self , A__ ):
# get log sigma
snake_case = sigma.log()
# get distribution
snake_case = log_sigma - self.log_sigmas[:, None]
# get sigmas range
snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
snake_case = low_idx + 1
snake_case = self.log_sigmas[low_idx]
snake_case = self.log_sigmas[high_idx]
# interpolate sigmas
snake_case = (low - log_sigma) / (low - high)
snake_case = w.clamp(0 , 1 )
# transform interpolation to time range
snake_case = (1 - w) * low_idx + w * high_idx
snake_case = t.view(sigma.shape )
return t
@property
def UpperCamelCase ( self ):
return self.sample is None
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = True , ):
snake_case = self.index_for_timestep(A__ )
# advance index counter by 1
snake_case = timestep.cpu().item() if torch.is_tensor(A__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case = self.sigmas[step_index]
snake_case = self.sigmas_interpol[step_index + 1]
snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
snake_case = self.sigmas[step_index - 1]
snake_case = self.sigmas_interpol[step_index]
snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case = 0
snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case = sigma_interpol - sigma_hat
# store for 2nd order step
snake_case = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
snake_case = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
snake_case = sigma_next - sigma_hat
snake_case = self.sample
snake_case = None
snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A__ ):
# mps does not support float64
snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case = self.timesteps.to(original_samples.device )
snake_case = timesteps.to(original_samples.device )
snake_case = [self.index_for_timestep(A__ , A__ ) for t in timesteps]
snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case = sigma.unsqueeze(-1 )
snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 702
|
'''simple docstring'''
_lowercase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 0
|
from math import loga
def _lowercase ( a__ : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(a__ , a__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCAmelCase = data_utils.TransfoXLTokenizer
__lowerCAmelCase = data_utils.TransfoXLCorpus
__lowerCAmelCase = data_utils
__lowerCAmelCase = data_utils
def _lowercase ( a__ : int , a__ : Optional[int] , a__ : Optional[int] , a__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(a__ , "rb" ) as fp:
_UpperCamelCase = pickle.load(a__ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCamelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_UpperCamelCase = corpus.vocab.__dict__
torch.save(a__ , a__ )
_UpperCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , a__ )
_UpperCamelCase = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(a__ , a__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCamelCase = os.path.abspath(a__ )
_UpperCamelCase = os.path.abspath(a__ )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCamelCase = TransfoXLConfig()
else:
_UpperCamelCase = TransfoXLConfig.from_json_file(a__ )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase = TransfoXLLMHeadModel(a__ )
_UpperCamelCase = load_tf_weights_in_transfo_xl(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'''Save PyTorch model to {os.path.abspath(a__ )}''' )
torch.save(model.state_dict() , a__ )
print(f'''Save configuration file to {os.path.abspath(a__ )}''' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__lowerCAmelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 147
| 1
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCAmelCase_ : List[Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
UpperCAmelCase_ : str = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=False ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Union[str, Any] = r".*sequential.(\d+).*"
_lowerCamelCase : Optional[int] = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCamelCase : List[Any] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
_lowerCamelCase : List[str] = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
_lowerCamelCase : Tuple = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase__ )//3}.linear.' )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
_lowerCamelCase : Optional[Any] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCamelCase : Optional[Any] = 1 if projecton_layer == 0 else 2
_lowerCamelCase : Dict = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCamelCase : Optional[Any] = value
_lowerCamelCase : Dict = mixed_qkv.size(0 ) // 3
_lowerCamelCase : List[str] = mixed_qkv[:qkv_dim]
_lowerCamelCase : List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCamelCase : Optional[int] = mixed_qkv[qkv_dim * 2 :]
_lowerCamelCase : int = query_layer
_lowerCamelCase : List[Any] = key_layer
_lowerCamelCase : Dict = value_layer
else:
_lowerCamelCase : Dict = value
return model_state_dict
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
_lowerCamelCase : int = clap_model.state_dict()
_lowerCamelCase : Dict = rename_state_dict(lowerCAmelCase__ )
_lowerCamelCase : Tuple = ClapConfig()
_lowerCamelCase : List[str] = enable_fusion
_lowerCamelCase : Dict = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 713
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 11
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str , _snake_case : Any ) -> List[str]:
'''simple docstring'''
a__ = data
a__ = None
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ = None
a__ = None
def __iter__( self : List[str] ) -> Iterator[Any]:
'''simple docstring'''
a__ = self.head
while self.head:
yield node.data
a__ = node.next
if node == self.head:
break
def __len__( self : List[Any] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return "->".join(str(_snake_case ) for item in iter(self ) )
def _lowerCAmelCase ( self : Any , _snake_case : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , _snake_case )
def _lowerCAmelCase ( self : int , _snake_case : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , _snake_case )
def _lowerCAmelCase ( self : Tuple , _snake_case : int , _snake_case : Any ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
a__ = Node(_snake_case )
if self.head is None:
a__ = new_node # first node points itself
a__ = a__ = new_node
elif index == 0: # insert at head
a__ = self.head
a__ = a__ = new_node
else:
a__ = self.head
for _ in range(index - 1 ):
a__ = temp.next
a__ = temp.next
a__ = new_node
if index == len(self ) - 1: # insert at tail
a__ = new_node
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.delete_nth(0 )
def _lowerCAmelCase ( self : str ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
a__ = self.head
if self.head == self.tail: # just one node
a__ = a__ = None
elif index == 0: # delete head node
a__ = self.tail.next.next
a__ = self.head.next
else:
a__ = self.head
for _ in range(index - 1 ):
a__ = temp.next
a__ = temp.next
a__ = temp.next.next
if index == len(self ) - 1: # delete at tail
a__ = temp
return delete_node.data
def _lowerCAmelCase ( self : Tuple ) -> bool:
'''simple docstring'''
return len(self ) == 0
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
a__ = CircularLinkedList()
assert len(UpperCAmelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase__ ) == i
circular_linked_list.insert_nth(UpperCAmelCase__,i + 1 )
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(1,6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(1,7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(0,7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(1,6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2,3 )
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(1,6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ = []
for part_id in partition_order:
a__ = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(UpperCAmelCase__ ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> int:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(1_00 ).repartition(1 )
a__ = Spark(UpperCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(10 ).repartition(2 )
a__ = [1, 0]
a__ = _generate_iterable_examples(UpperCAmelCase__,UpperCAmelCase__ ) # Reverse the partitions.
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,UpperCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a__ , a__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(10 ).repartition(1 )
a__ = SparkExamplesIterable(UpperCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
a__ = lambda UpperCAmelCase__ : x.reverse()
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[2, 1, 0] )
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shuffle_data_sources(UpperCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=0,num_workers=2 )
assert shard_it_a.n_shards == 2
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=1,num_workers=2 )
assert shard_it_a.n_shards == 2
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(1_00 ).repartition(1 )
a__ = Spark(UpperCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 232
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65
| 0
|
from __future__ import annotations
snake_case = 1.6021e-19 # units = C
def lowerCamelCase__ ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase : List[str] = {"UserAgent": UserAgent().random}
def UpperCAmelCase_ ( snake_case__ ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = script.contents[0]
lowerCAmelCase__ = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
def __init__( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = f'https://www.instagram.com/{username}/'
lowerCAmelCase__ = self.get_json()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = requests.get(self.url ,headers=a_ ).text
lowerCAmelCase__ = BeautifulSoup(a_ ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self ):
"""simple docstring"""
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def UpperCAmelCase_ ( snake_case__ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
lowerCAmelCase__ = InstagramUser(snake_case__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , snake_case__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[int] = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 193
| 0
|
def lowerCamelCase__ (_UpperCAmelCase):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 444
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self , a , a=99 , a=13 , a=7 , a=9 , a=True , a=True , a=False , a=32 , a=5 , a=4 , a=37 , a=8 , a=0.1 , a=0.0_02 , a=1 , a=0 , a=0 , a=None , a=None , ) -> List[str]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = encoder_seq_length
SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE = self.decoder_seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = d_ff
SCREAMING_SNAKE_CASE = relative_attention_num_buckets
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = decoder_start_token_id
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return TaConfig.from_pretrained('google/umt5-base')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Optional[int]:
if attention_mask is None:
SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=a)
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=a)
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=a)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1)
SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1)
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = config.num_attention_heads
SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(a , a , a)
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = UMTaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
input_ids=a , decoder_input_ids=a , attention_mask=a , decoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(input_ids=a , decoder_input_ids=a)
SCREAMING_SNAKE_CASE = result.last_hidden_state
SCREAMING_SNAKE_CASE = result.past_key_values
SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(a) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = UMTaModel(config=a).get_decoder().to(a).eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(a , use_cache=a)
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , use_cache=a)
self.parent.assertTrue(len(a) == len(a))
self.parent.assertTrue(len(a) == len(a) + 1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = model(a , past_key_values=a)['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self , a , a , ) -> str:
SCREAMING_SNAKE_CASE = UMTaModel(config=a).to(a).half().eval()
SCREAMING_SNAKE_CASE = model(**a)['last_hidden_state']
self.parent.assertFalse(torch.isnan(a).any().item())
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowercase : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowercase : Tuple = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : int = True
_lowercase : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowercase : int = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0]).to(a)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=a , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = config_and_inputs[0]
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(a).eval()
model.to(a)
SCREAMING_SNAKE_CASE = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=a),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=a),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=a),
}
for attn_name, (name, mask) in zip(a , head_masking.items()):
SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_heads , device=a)
SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=a , return_dict_in_generate=a , **a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=a).to(a)
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=a , legacy=a)
SCREAMING_SNAKE_CASE = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='pt' , padding=a).input_ids
# fmt: off
SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
])
# fmt: on
torch.testing.assert_allclose(a , a)
SCREAMING_SNAKE_CASE = model.generate(input_ids.to(a))
SCREAMING_SNAKE_CASE = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertEqual(a , a)
| 444
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = parent
a__ : Optional[Any] = 1_3
a__ : List[Any] = 7
a__ : Optional[Any] = True
a__ : Any = True
a__ : Optional[int] = True
a__ : int = True
a__ : Tuple = True
a__ : Any = False
a__ : Any = False
a__ : Dict = False
a__ : Any = 2
a__ : List[str] = 9_9
a__ : List[str] = 0
a__ : str = 3_2
a__ : List[str] = 2
a__ : Union[str, Any] = 4
a__ : List[str] = 0.1
a__ : Optional[int] = 0.1
a__ : str = 5_1_2
a__ : Dict = 1_6
a__ : Union[str, Any] = 2
a__ : Optional[Any] = 0.0_2
a__ : Any = 3
a__ : Optional[int] = 4
a__ : Union[str, Any] = """last"""
a__ : Optional[Any] = True
a__ : str = None
a__ : str = 0
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Dict = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
a__ : Dict = None
if self.use_input_lengths:
a__ : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ : int = None
if self.use_token_type_ids:
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a__ : str = None
a__ : int = None
a__ : Dict = None
if self.use_labels:
a__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Optional[int] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
a__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
a__ : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
"""simple docstring"""
a__ : Dict = TFFlaubertModel(config=__UpperCamelCase )
a__ : Dict = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
a__ : Union[str, Any] = model(__UpperCamelCase )
a__ : Union[str, Any] = [input_ids, input_mask]
a__ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = TFFlaubertWithLMHeadModel(__UpperCamelCase )
a__ : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
a__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[int]:
"""simple docstring"""
a__ : str = TFFlaubertForQuestionAnsweringSimple(__UpperCamelCase )
a__ : int = {"""input_ids""": input_ids, """lengths""": input_lengths}
a__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[Any]:
"""simple docstring"""
a__ : int = TFFlaubertForSequenceClassification(__UpperCamelCase )
a__ : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
a__ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
"""simple docstring"""
a__ : int = self.num_labels
a__ : str = TFFlaubertForTokenClassification(config=__UpperCamelCase )
a__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
a__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = self.num_choices
a__ : List[Any] = TFFlaubertForMultipleChoice(config=__UpperCamelCase )
a__ : List[str] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
a__ : Dict = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
a__ : List[str] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
a__ : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
a__ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = config_and_inputs
a__ : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class snake_case__ (__snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCAmelCase :List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowerCAmelCase :List[Any] = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase :List[Any] = False
__lowerCAmelCase :Dict = False
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = TFFlaubertModelTester(self )
a__ : Any = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCamelCase )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCamelCase )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[str] = TFFlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : int = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
a__ : Optional[Any] = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
a__ : Dict = model(__UpperCamelCase )[0]
a__ : Dict = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice.
a__ : Optional[int] = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 136
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: Tuple = KandinskyInpaintPipeline
a: Union[str, Any] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
a: Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a: str = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a: Dict = False
@property
def _A ( self: Dict ):
return 32
@property
def _A ( self: Any ):
return 32
@property
def _A ( self: Optional[Any] ):
return self.time_input_dim
@property
def _A ( self: Tuple ):
return self.time_input_dim * 4
@property
def _A ( self: Dict ):
return 100
@property
def _A ( self: Optional[int] ):
_a = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _A ( self: str ):
torch.manual_seed(0 )
_a = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_a = MultilingualCLIP(__UpperCamelCase )
_a = text_encoder.eval()
return text_encoder
@property
def _A ( self: Tuple ):
torch.manual_seed(0 )
_a = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def _A ( self: str ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self: Dict ):
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self: Dict ):
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_unet
_a = self.dummy_movq
_a = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__UpperCamelCase , )
_a = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _A ( self: Dict , __UpperCamelCase: List[Any] , __UpperCamelCase: List[Any]=0 ):
_a = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_a = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCamelCase )
# create init_image
_a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
_a = np.ones((64, 64) , dtype=np.floataa )
_a = 0
if str(__UpperCamelCase ).startswith('''mps''' ):
_a = torch.manual_seed(__UpperCamelCase )
else:
_a = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_a = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _A ( self: int ):
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**__UpperCamelCase )
_a = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _A ( self: Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: int ):
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a = np.ones((768, 768) , dtype=np.floataa )
_a = 0
_a = '''a hat'''
_a = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
_a = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a = pipeline(
__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 487
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->int:
debug_launcher(test_script.main )
def A_ ( self : Union[str, Any] ) ->Optional[Any]:
debug_launcher(test_ops.main )
| 26
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _UpperCAmelCase ( ) -> Dict:
_lowerCAmelCase : List[str] = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase )
EnvironmentCommand.register_subcommand(_lowerCamelCase )
TestCommand.register_subcommand(_lowerCamelCase )
RunBeamCommand.register_subcommand(_lowerCamelCase )
DummyDataCommand.register_subcommand(_lowerCamelCase )
# Parse args
_lowerCAmelCase , _lowerCAmelCase : List[Any] = parser.parse_known_args()
if not hasattr(_lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
_lowerCAmelCase : Optional[int] = parse_unknown_args(_lowerCamelCase )
# Run
_lowerCAmelCase : List[Any] = args.func(_lowerCamelCase , **_lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 384
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class a_ (_a ):
__lowerCAmelCase : List[Any] = """bart"""
__lowerCAmelCase : Tuple = ["""past_key_values"""]
__lowerCAmelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case_=5_0_2_6_5 , snake_case_=1_0_2_4 , snake_case_=1_2 , snake_case_=4_0_9_6 , snake_case_=1_6 , snake_case_=1_2 , snake_case_=4_0_9_6 , snake_case_=1_6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_="gelu" , snake_case_=1_0_2_4 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0.0 , snake_case_=False , snake_case_=True , snake_case_=3 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=True , snake_case_=2 , snake_case_=2 , **snake_case_ , ):
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : int = d_model
_lowerCAmelCase : Optional[Any] = encoder_ffn_dim
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : int = encoder_attention_heads
_lowerCAmelCase : Optional[Any] = decoder_ffn_dim
_lowerCAmelCase : Any = decoder_layers
_lowerCAmelCase : Tuple = decoder_attention_heads
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : int = activation_dropout
_lowerCAmelCase : Dict = activation_function
_lowerCAmelCase : Union[str, Any] = init_std
_lowerCAmelCase : List[Any] = encoder_layerdrop
_lowerCAmelCase : int = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : Tuple = use_cache
_lowerCAmelCase : List[Any] = encoder_layers
_lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , snake_case_ ):
_lowerCAmelCase : Dict = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
class a_ (_a ):
@property
def __UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase : List[str] = {0: """batch"""}
_lowerCAmelCase : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.num_layers
for i in range(snake_case_ ):
_lowerCAmelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[int] = super().outputs
else:
_lowerCAmelCase : int = super(snake_case_ , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : str = self.num_layers
for i in range(snake_case_ ):
_lowerCAmelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
_lowerCAmelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
_lowerCAmelCase : Union[str, Any] = seq_length if not self.use_past else 1
_lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase : List[str] = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = common_inputs["""input_ids"""].shape
_lowerCAmelCase : Tuple = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.num_attention_heads
_lowerCAmelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : List[str] = decoder_seq_length + 3
_lowerCAmelCase : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
_lowerCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.num_layers
_lowerCAmelCase : List[str] = min(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = max(snake_case_ , snake_case_ ) - min_num_layers
_lowerCAmelCase : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
_lowerCAmelCase : Optional[int] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
_lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Union[str, Any] = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase : Any = self.num_layers
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.num_attention_heads
_lowerCAmelCase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
_lowerCAmelCase : Any = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase : Optional[int] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : Tuple = tokenizer.num_special_tokens_to_add(snake_case_ )
_lowerCAmelCase : List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase : Tuple = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
elif self.task == "causal-lm":
_lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 384
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase ( lowerCamelCase_ : List[Any] ):
__a : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' )
def lowerCamelCase ( lowerCamelCase_ : Dict ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
__a : Union[str, Any] = _readaa(lowerCamelCase_ )
if magic != 2_0_5_1:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__a : Any = _readaa(lowerCamelCase_ )
__a : int = _readaa(lowerCamelCase_ )
__a : List[Any] = _readaa(lowerCamelCase_ )
__a : str = bytestream.read(rows * cols * num_images )
__a : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
__a : Optional[Any] = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , 'Please use tf.one_hot on tensors.' )
def lowerCamelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = labels_dense.shape[0]
__a : str = numpy.arange(lowerCamelCase_ ) * num_classes
__a : Any = numpy.zeros((num_labels, num_classes) )
__a : List[str] = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' )
def lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : int=1_0 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
__a : List[str] = _readaa(lowerCamelCase_ )
if magic != 2_0_4_9:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__a : Optional[int] = _readaa(lowerCamelCase_ )
__a : Dict = bytestream.read(lowerCamelCase_ )
__a : Union[str, Any] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class _UpperCamelCase:
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Any=dtypes.floataa , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=None , ):
'''simple docstring'''
__a : List[Any] = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__a : Optional[Any] = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__a : Dict = 1_0_0_0_0
__a : Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__a : List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a : Optional[int] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a : str = images.astype(numpy.floataa )
__a : Optional[Any] = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 )
__a : int = images
__a : Optional[Any] = labels
__a : Tuple = 0
__a : Tuple = 0
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self._images
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self._labels
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self._num_examples
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self._epochs_completed
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=True ):
'''simple docstring'''
if fake_data:
__a : List[Any] = [1] * 7_8_4
__a : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__a : Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = self.images[perma]
__a : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a : List[str] = self._num_examples - start
__a : Tuple = self._images[start : self._num_examples]
__a : Union[str, Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a : str = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = self.images[perm]
__a : Any = self.labels[perm]
# Start next epoch
__a : Dict = 0
__a : List[Any] = batch_size - rest_num_examples
__a : str = self._index_in_epoch
__a : List[Any] = self._images[start:end]
__a : List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__a : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , 'Please write your own downloading logic.' )
def lowerCamelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
__a : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
__a : str = f.size()
print('Successfully downloaded' , lowerCamelCase_ , lowerCamelCase_ , 'bytes.' )
return filepath
@deprecated(
lowerCamelCase_ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Any=dtypes.floataa , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Union[str, Any]=5_0_0_0 , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
__a : List[str] = fake()
__a : Union[str, Any] = fake()
__a : Optional[Any] = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
__a : Dict = DEFAULT_SOURCE_URL
__a : int = 'train-images-idx3-ubyte.gz'
__a : List[Any] = 'train-labels-idx1-ubyte.gz'
__a : Any = 't10k-images-idx3-ubyte.gz'
__a : Optional[int] = 't10k-labels-idx1-ubyte.gz'
__a : Optional[int] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : List[Any] = _extract_images(lowerCamelCase_ )
__a : Any = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : str = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
__a : List[str] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : Union[str, Any] = _extract_images(lowerCamelCase_ )
__a : Dict = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : Optional[Any] = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
__a : Optional[Any] = (
'Validation size should be between 0 and '
f'''{len(lowerCamelCase_ )}. Received: {validation_size}.'''
)
raise ValueError(lowerCamelCase_ )
__a : int = train_images[:validation_size]
__a : Any = train_labels[:validation_size]
__a : Optional[Any] = train_images[validation_size:]
__a : int = train_labels[validation_size:]
__a : Any = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
__a : str = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
__a : Any = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
__a : str = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 703
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 577
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=6_4 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : str=5_1_2 , SCREAMING_SNAKE_CASE__ : Dict=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : str=1 , ) -> List[Any]:
a_ : Union[str, Any] = parent
a_ : Union[str, Any] = batch_size
a_ : Union[str, Any] = seq_length
a_ : List[Any] = is_training
a_ : List[Any] = use_input_mask
a_ : List[str] = use_token_type_ids
a_ : Dict = use_labels
a_ : Optional[int] = vocab_size
a_ : Any = hidden_size
a_ : List[str] = num_hidden_layers
a_ : int = num_attention_heads
a_ : List[Any] = intermediate_size
a_ : Optional[Any] = hidden_act
a_ : int = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : Union[str, Any] = max_position_embeddings
a_ : Dict = type_vocab_size
a_ : List[str] = type_sequence_label_size
a_ : Dict = initializer_range
a_ : int = num_labels
a_ : Tuple = num_choices
a_ : Optional[Any] = scope
a_ : Optional[int] = q_groups
a_ : int = k_groups
a_ : List[Any] = v_groups
a_ : Dict = post_attention_groups
a_ : Any = intermediate_groups
a_ : List[str] = output_groups
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Dict = None
if self.use_input_mask:
a_ : str = random_attention_mask([self.batch_size, self.seq_length] )
a_ : int = None
a_ : Dict = None
a_ : Union[str, Any] = None
if self.use_labels:
a_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
a_ : Optional[int] = SqueezeBertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
a_ : str = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
a_ : Tuple = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
a_ : Optional[Any] = self.num_labels
a_ : List[Any] = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
a_ : str = self.num_labels
a_ : int = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
a_ : List[Any] = self.num_choices
a_ : str = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Dict = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Union[str, Any] = self.prepare_config_and_inputs()
((a_) , (a_) , (a_) , (a_) , (a_) , (a_)) : int = config_and_inputs
a_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
snake_case__ : List[str] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : Dict = True
snake_case__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
a_ : Tuple = SqueezeBertModelTester(self )
a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
a_ : Tuple = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
a_ : int = model(SCREAMING_SNAKE_CASE__ )[0]
a_ : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
a_ : Any = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 570
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ : Tuple = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with open(__A , 'r' ) as f:
a_ : int = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" , SCREAMING_SNAKE_CASE__ : str="<cls>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : int="<eos>" , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = load_vocab_file(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = dict(enumerate(self.all_tokens ) )
a_ : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a_ : List[str] = unk_token
a_ : Optional[int] = cls_token
a_ : List[str] = pad_token
a_ : Optional[Any] = mask_token
a_ : Dict = eos_token
a_ : Dict = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> int:
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
return text.split()
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : str=False ) -> List[str]:
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> int:
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
a_ : Any = [self.cls_token_id]
a_ : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List , SCREAMING_SNAKE_CASE__ : Optional[List] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a_ : List[str] = [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE__ ) + [1]
return mask
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
a_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[List[str], List[AddedToken]] , SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
return super()._add_tokens(SCREAMING_SNAKE_CASE__ , special_tokens=SCREAMING_SNAKE_CASE__ )
| 570
| 1
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def snake_case_ ( _lowerCAmelCase : Optional[Any]=32 , _lowerCAmelCase : List[str]=10 , _lowerCAmelCase : Union[str, Any]=100 , _lowerCAmelCase : Dict=1026 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]="data/tokenized_stories_train_wikitext103.jbl" , _lowerCAmelCase : List[Any]="igf_context_pairs.jbl" , ) -> int:
set_seed(3 )
# generate train_data and objective_set
UpperCAmelCase , UpperCAmelCase : str = generate_datasets(
_lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1026 , trim=_lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCAmelCase : List[str] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
UpperCAmelCase : Optional[Any] = load_gpta('''gpt2''' ).to(_lowerCAmelCase )
print('''computing perplexity on objective set''' )
UpperCAmelCase : str = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item()
print('''perplexity on objective set:''' , _lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=15 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Dict=100 , _lowerCAmelCase : Union[str, Any]="igf_model.pt" , ) -> Any:
set_seed(42 )
# Load pre-trained model
UpperCAmelCase : List[str] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
UpperCAmelCase : str = SecondaryLearner(_lowerCAmelCase )
# Train secondary learner
UpperCAmelCase : List[Any] = train_secondary_learner(
_lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : Optional[Any]=1000 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Dict=1.0 , _lowerCAmelCase : List[Any]=recopy_gpta , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[str]=10 , _lowerCAmelCase : Dict="gpt2_finetuned.pt" , ) -> Optional[int]:
UpperCAmelCase : Tuple = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase : int = RandomSampler(_lowerCAmelCase )
UpperCAmelCase : Dict = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = max_steps // (len(_lowerCAmelCase )) + 1
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCAmelCase )
secondary_learner.eval()
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
UpperCAmelCase : Any = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print('''Test perplexity, step''' , _lowerCAmelCase , ''':''' , _lowerCAmelCase )
for epoch in range(int(_lowerCAmelCase ) ):
for step, example in enumerate(_lowerCAmelCase ):
torch.cuda.empty_cache()
UpperCAmelCase : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCAmelCase : List[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCAmelCase : Optional[int] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
UpperCAmelCase : Tuple = True
if secondary_learner is not None:
UpperCAmelCase : Optional[int] = secondary_learner.forward(
torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCAmelCase : Optional[int] = -1
if predicted_q < threshold:
UpperCAmelCase : Union[str, Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCAmelCase : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCAmelCase : Optional[int] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCAmelCase : List[Any] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print('''Test perplexity, step''' , _lowerCAmelCase , ''':''' , _lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def snake_case_ ( ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=_lowerCAmelCase , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=_lowerCAmelCase , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=_lowerCAmelCase , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=_lowerCAmelCase , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_lowerCAmelCase , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_lowerCAmelCase , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=_lowerCAmelCase , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=_lowerCAmelCase , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_lowerCAmelCase , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_lowerCAmelCase , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_lowerCAmelCase , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_lowerCAmelCase , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
UpperCAmelCase : str = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
UpperCAmelCase : Tuple = training_secondary_learner(
_lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
UpperCAmelCase : str = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCAmelCase , UpperCAmelCase : int = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=_lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 528
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCamelCase__: List[str] = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = _str_to_version_tuple(self.version_str )
def __repr__( self : str ) -> Optional[Any]:
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def A ( self : Tuple ) -> Dict:
return self.major, self.minor, self.patch
def A ( self : str , __snake_case : Any ) -> Tuple:
if isinstance(__snake_case , __snake_case ):
return Version(__snake_case )
elif isinstance(__snake_case , __snake_case ):
return other
raise TypeError(F"""{other} (type {type(__snake_case )}) cannot be compared to version.""" )
def __eq__( self : str , __snake_case : Union[str, Any] ) -> List[Any]:
try:
UpperCAmelCase : Dict = self._validate_operand(__snake_case )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Any , __snake_case : int ) -> Optional[int]:
UpperCAmelCase : int = self._validate_operand(__snake_case )
return self.tuple < other.tuple
def __hash__( self : List[str] ) -> str:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def A ( cls : Any , __snake_case : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def A ( self : Dict ) -> str:
return self.version_str
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = _VERSION_REG.match(_lowerCAmelCase )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(_lowerCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def snake_case_ ( _lowerCAmelCase : List[str] ) -> List[Any]:
return ".".join(str(_lowerCAmelCase ) for v in version_tuple )
| 528
| 1
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
__lowerCamelCase : Tuple = BertTokenizer.from_pretrained('bert-base-uncased' )
__lowerCamelCase : Optional[int] = bertabert.config.encoder.vocab_size
__lowerCamelCase : Any = tokenizer.sep_token_id
__lowerCamelCase : List[Any] = tokenizer.cls_token_id
__lowerCamelCase : List[str] = 1_28
__lowerCamelCase : Dict = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
__lowerCamelCase : str = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
__lowerCamelCase : Dict = train_dataset.select(range(32 ) )
__lowerCamelCase : str = val_dataset.select(range(16 ) )
__lowerCamelCase : Any = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase : int = tokenizer(batch['article'] , padding='max_length' , truncation=SCREAMING_SNAKE_CASE_ , max_length=5_12 )
__lowerCamelCase : List[Any] = tokenizer(batch['highlights'] , padding='max_length' , truncation=SCREAMING_SNAKE_CASE_ , max_length=1_28 )
__lowerCamelCase : str = inputs.input_ids
__lowerCamelCase : List[str] = inputs.attention_mask
__lowerCamelCase : Optional[Any] = outputs.input_ids
__lowerCamelCase : Dict = outputs.input_ids.copy()
__lowerCamelCase : Tuple = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__lowerCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE_ ) == 5_12 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Tuple = pred.label_ids
__lowerCamelCase : Optional[Any] = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) / len(SCREAMING_SNAKE_CASE_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase : List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__lowerCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__lowerCamelCase : int = self.get_auto_remove_tmp_dir()
__lowerCamelCase : Dict = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE_ , per_device_train_batch_size=SCREAMING_SNAKE_CASE_ , per_device_eval_batch_size=SCREAMING_SNAKE_CASE_ , predict_with_generate=SCREAMING_SNAKE_CASE_ , evaluation_strategy='steps' , do_train=SCREAMING_SNAKE_CASE_ , do_eval=SCREAMING_SNAKE_CASE_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase : Any = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
# start training
trainer.train()
| 13
|
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowercase : List[str] = logging.getLogger(__name__)
__lowercase : Dict = 5_0 # max width of layer names
__lowercase : Any = 7_0 # max width of quantizer names
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_lowerCamelCase , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_lowerCamelCase , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_lowerCamelCase , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_lowerCamelCase , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_lowerCamelCase , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_lowerCamelCase , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase_ ( _lowerCamelCase : str ):
if args.calibrator == "max":
lowerCamelCase_ = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
lowerCamelCase_ = '''histogram'''
elif args.calibrator == "mse":
lowerCamelCase_ = '''histogram'''
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
lowerCamelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
lowerCamelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False , _lowerCamelCase : List[Any]=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ['''embeddings'''] , which='''weight''' , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [''''''] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Dict ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
def fusea(_lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : str ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
lowerCamelCase_ = qq._amax.detach().item()
lowerCamelCase_ = qk._amax.detach().item()
lowerCamelCase_ = qv._amax.detach().item()
lowerCamelCase_ = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
lowerCamelCase_ = mod.weight.shape[0]
lowerCamelCase_ = mod._weight_quantizer._amax.detach()
lowerCamelCase_ = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowerCamelCase_ = amax
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict=2_5 , _lowerCamelCase : List[Any]=1_8_0 , _lowerCamelCase : str=None ):
if ignore is None:
lowerCamelCase_ = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = [ignore]
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , '''weight''' ):
continue
lowerCamelCase_ = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
lowerCamelCase_ = getattr(_lowerCamelCase , '''_input_quantizer''' , _lowerCamelCase )
lowerCamelCase_ = getattr(_lowerCamelCase , '''_weight_quantizer''' , _lowerCamelCase )
if not hasattr(_lowerCamelCase , '''weight''' ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
lowerCamelCase_ = F"""Act:{input_q.extra_repr()}"""
lowerCamelCase_ = F"""Wgt:{weight_q.extra_repr()}"""
lowerCamelCase_ = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def lowerCamelCase_ ( _lowerCamelCase : int ):
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def lowerCamelCase_ ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]="both" , **_lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '''_input_quantizer''' , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '''_weight_quantizer''' , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , **_lowerCamelCase : List[str] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_input_quantizer''' ) or hasattr(_lowerCamelCase , '''_weight_quantizer''' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
| 142
| 0
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
__A : List[Any] = True
from torch.cuda.amp import autocast
__A : Tuple = logging.getLogger(__name__)
def __a ( A__ : Tuple=None , A__ : Tuple=None ):
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCamelCase__ = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
lowerCamelCase__ = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
lowerCamelCase__ = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
lowerCamelCase__ = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
lowerCamelCase__ = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
lowerCamelCase__ = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCamelCase__ = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
lowerCamelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCamelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
lowerCamelCase__ = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "\'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self : Tuple , __lowerCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
SCREAMING_SNAKE_CASE = [{"input_values": feature["input_values"]} for feature in features]
SCREAMING_SNAKE_CASE = [{"input_ids": feature["labels"]} for feature in features]
SCREAMING_SNAKE_CASE = self.processor.pad(
_lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = self.processor.pad(
labels=_lowerCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
SCREAMING_SNAKE_CASE = labels
return batch
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] , __lowerCamelCase : nn.Module , __lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
SCREAMING_SNAKE_CASE = self._prepare_inputs(_lowerCAmelCase )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE = self.compute_loss(_lowerCAmelCase , _lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE = self.compute_loss(_lowerCAmelCase , _lowerCAmelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_lowerCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_lowerCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_lowerCAmelCase )
else:
loss.backward()
return loss.detach()
def __a ( ):
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
SCREAMING_SNAKE_CASE = F"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(A__ : Dict ):
SCREAMING_SNAKE_CASE = re.sub(A__ , "" , batch["sentence"] ).lower() + " "
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(A__ , remove_columns=["sentence"] )
SCREAMING_SNAKE_CASE = eval_dataset.map(A__ , remove_columns=["sentence"] )
def extract_all_chars(A__ : int ):
SCREAMING_SNAKE_CASE = " ".join(batch["text"] )
SCREAMING_SNAKE_CASE = list(set(A__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
SCREAMING_SNAKE_CASE = {v: k for k, v in enumerate(A__ )}
SCREAMING_SNAKE_CASE = vocab_dict[" "]
del vocab_dict[" "]
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = len(A__ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(A__ , A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=A__ , return_attention_mask=A__ )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(A__ ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE = train_dataset.select(range(A__ ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A__ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(batch["path"] )
SCREAMING_SNAKE_CASE = resampler(A__ ).squeeze().numpy()
SCREAMING_SNAKE_CASE = 16000
SCREAMING_SNAKE_CASE = batch["text"]
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A__ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
SCREAMING_SNAKE_CASE = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(A__ )
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE = datasets.load_metric("wer" )
def compute_metrics(A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = pred.predictions
SCREAMING_SNAKE_CASE = np.argmax(A__ , axis=-1 )
SCREAMING_SNAKE_CASE = processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = processor.batch_decode(A__ )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE = processor.batch_decode(pred.label_ids , group_tokens=A__ )
SCREAMING_SNAKE_CASE = wer_metric.compute(predictions=A__ , references=A__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE = DataCollatorCTCWithPadding(processor=A__ , padding=A__ )
# Initialize our Trainer
SCREAMING_SNAKE_CASE = CTCTrainer(
model=A__ , data_collator=A__ , args=A__ , compute_metrics=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
SCREAMING_SNAKE_CASE = min(A__ , len(A__ ) )
trainer.log_metrics("train" , A__ )
trainer.save_metrics("train" , A__ )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = data_args.max_val_samples if data_args.max_val_samples is not None else len(A__ )
SCREAMING_SNAKE_CASE = min(A__ , len(A__ ) )
trainer.log_metrics("eval" , A__ )
trainer.save_metrics("eval" , A__ )
return results
if __name__ == "__main__":
main()
| 709
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 698
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__magic_name__ : Union[str, Any] =TypeVar('T')
__magic_name__ : str =TypeVar('U')
class UpperCamelCase_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : T | None , _lowerCamelCase : U | None ) -> int:
__magic_name__ = key
__magic_name__ = val
__magic_name__ = None
__magic_name__ = None
def __repr__( self : Union[str, Any] ) -> str:
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCamelCase_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : int ) -> None:
__magic_name__ = DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
__magic_name__ , __magic_name__ = self.rear, self.head
def __repr__( self : int ) -> str:
__magic_name__ = ["DoubleLinkedList"]
__magic_name__ = self.head
while node.next is not None:
rep.append(str(_lowerCamelCase ) )
__magic_name__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_lowerCamelCase )
def __A ( self : List[str] , _lowerCamelCase : DoubleLinkedListNode[T, U] ) -> None:
__magic_name__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ = node
__magic_name__ = previous
__magic_name__ = node
__magic_name__ = self.rear
def __A ( self : Dict , _lowerCamelCase : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
__magic_name__ = node.next
__magic_name__ = node.prev
__magic_name__ = None
__magic_name__ = None
return node
class UpperCamelCase_ ( Generic[T, U] ):
"""simple docstring"""
UpperCAmelCase__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : Optional[Any] , _lowerCamelCase : int ) -> Any:
__magic_name__ = DoubleLinkedList()
__magic_name__ = capacity
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = {}
def __repr__( self : str ) -> str:
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : List[Any] , _lowerCamelCase : T ) -> bool:
return key in self.cache
def __A ( self : Optional[int] , _lowerCamelCase : T ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__magic_name__ = self.cache[key]
__magic_name__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_lowerCamelCase )
return node.val
self.miss += 1
return None
def __A ( self : Optional[int] , _lowerCamelCase : T , _lowerCamelCase : U ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ = DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ = value
self.list.add(_lowerCamelCase )
@classmethod
def __A ( cls : Dict , _lowerCamelCase : int = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*_lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ = LRUCache(_lowerCamelCase )
__magic_name__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ = func(*_lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , _lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_lowerCamelCase , "cache_info" , _lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = "speech_to_text"
SCREAMING_SNAKE_CASE_: str = ["past_key_values"]
SCREAMING_SNAKE_CASE_: Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict=10_000 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Union[str, Any]=2_048 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]="relu" , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Tuple=6_000 , UpperCAmelCase_ : Dict=1_024 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Any=(5, 5) , UpperCAmelCase_ : Optional[int]=1_024 , UpperCAmelCase_ : str=80 , UpperCAmelCase_ : Tuple=1 , **UpperCAmelCase_ : List[str] , ) -> str:
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase = max_source_positions
_lowerCAmelCase = max_target_positions
_lowerCAmelCase = num_conv_layers
_lowerCAmelCase = list(__a )
_lowerCAmelCase = conv_channels
_lowerCAmelCase = input_feat_per_channel
_lowerCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 717
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = "gpt_neox_japanese"
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any=32_000 , UpperCAmelCase_ : Dict=2_560 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Union[str, Any]=1.00 , UpperCAmelCase_ : Any=10_000 , UpperCAmelCase_ : Optional[int]=2_048 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=1E-5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=31_996 , UpperCAmelCase_ : List[Any]=31_999 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=0.0 , **UpperCAmelCase_ : str , ) -> int:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_multiple_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = rotary_pct
_lowerCAmelCase = rotary_emb_base
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = hidden_dropout
| 491
| 0
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Optional[int] ={
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] =[
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: Optional[int] = ProphetNetTokenizer
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Dict ) -> Optional[int]:
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase__ ( self : Tuple , __snake_case : str ) -> List[str]:
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : Dict ) -> Optional[Any]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : str ) -> List[str]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Tuple ) -> str:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Dict ) -> Optional[int]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowercase__ ( self : Any ) -> Dict:
_lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase = {}
for i, token in enumerate(__snake_case ):
_lowerCAmelCase = i
_lowerCAmelCase = WordpieceTokenizer(vocab=__snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase__ ( self : List[Any] ) -> Any:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowercase__ ( self : int ) -> int:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowercase__ ( self : int ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 207
| 1
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ = logging.get_logger(__name__)
# General docstring
lowercase__ = "PoolFormerConfig"
# Base docstring
lowercase__ = "sail/poolformer_s12"
lowercase__ = [1, 512, 7, 7]
# Image classification docstring
lowercase__ = "sail/poolformer_s12"
lowercase__ = "tabby, tabby cat"
lowercase__ = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False ) -> Any:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
_a = 1 - drop_prob
_a = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_a = keep_prob + torch.rand(__lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_a = input.div(__lowerCamelCase ) * random_tensor
return output
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __UpperCamelCase = None ) -> None:
super().__init__()
_a = drop_prob
def a_ ( self , __UpperCamelCase ) -> torch.Tensor:
return drop_path(__UpperCamelCase , self.drop_prob , self.training )
def a_ ( self ) -> str:
return "p={}".format(self.drop_prob )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> str:
super().__init__()
_a = patch_size if isinstance(__UpperCamelCase , collections.abc.Iterable ) else (patch_size, patch_size)
_a = stride if isinstance(__UpperCamelCase , collections.abc.Iterable ) else (stride, stride)
_a = padding if isinstance(__UpperCamelCase , collections.abc.Iterable ) else (padding, padding)
_a = nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=__UpperCamelCase , stride=__UpperCamelCase , padding=__UpperCamelCase )
_a = norm_layer(__UpperCamelCase ) if norm_layer else nn.Identity()
def a_ ( self , __UpperCamelCase ) -> Union[str, Any]:
_a = self.projection(__UpperCamelCase )
_a = self.norm(__UpperCamelCase )
return embeddings
class __SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
def __init__( self , __UpperCamelCase , **__UpperCamelCase ) -> Tuple:
super().__init__(1 , __UpperCamelCase , **__UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __UpperCamelCase ) -> str:
super().__init__()
_a = nn.AvgPoolad(__UpperCamelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__UpperCamelCase )
def a_ ( self , __UpperCamelCase ) -> Any:
return self.pool(__UpperCamelCase ) - hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
super().__init__()
_a = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
_a = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
_a = PoolFormerDropPath(__UpperCamelCase )
if isinstance(config.hidden_act , __UpperCamelCase ):
_a = ACTaFN[config.hidden_act]
else:
_a = config.hidden_act
def a_ ( self , __UpperCamelCase ) -> List[Any]:
_a = self.conva(__UpperCamelCase )
_a = self.act_fn(__UpperCamelCase )
_a = self.drop(__UpperCamelCase )
_a = self.conva(__UpperCamelCase )
_a = self.drop(__UpperCamelCase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
super().__init__()
_a = PoolFormerPooling(__UpperCamelCase )
_a = PoolFormerOutput(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_a = PoolFormerGroupNorm(__UpperCamelCase )
_a = PoolFormerGroupNorm(__UpperCamelCase )
# Useful for training neural nets
_a = PoolFormerDropPath(__UpperCamelCase ) if drop_path > 0.0 else nn.Identity()
_a = config.use_layer_scale
if config.use_layer_scale:
_a = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCamelCase) ) , requires_grad=__UpperCamelCase )
_a = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCamelCase) ) , requires_grad=__UpperCamelCase )
def a_ ( self , __UpperCamelCase ) -> Optional[int]:
if self.use_layer_scale:
_a = self.pooling(self.before_norm(__UpperCamelCase ) )
_a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_a = hidden_states + self.drop_path(__UpperCamelCase )
_a = ()
_a = self.output(self.after_norm(__UpperCamelCase ) )
_a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_a = hidden_states + self.drop_path(__UpperCamelCase )
_a = (output,) + outputs
return outputs
else:
_a = self.drop_path(self.pooling(self.before_norm(__UpperCamelCase ) ) )
# First residual connection
_a = pooling_output + hidden_states
_a = ()
# Second residual connection inside the PoolFormerOutput block
_a = self.drop_path(self.output(self.after_norm(__UpperCamelCase ) ) )
_a = hidden_states + layer_output
_a = (output,) + outputs
return outputs
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __UpperCamelCase ) -> Optional[int]:
super().__init__()
_a = config
# stochastic depth decay rule
_a = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_a = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_a = nn.ModuleList(__UpperCamelCase )
# Transformer blocks
_a = []
_a = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_a = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__UpperCamelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__UpperCamelCase ) )
_a = nn.ModuleList(__UpperCamelCase )
def a_ ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True ) -> Optional[Any]:
_a = () if output_hidden_states else None
_a = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_a , _a = layers
# Get patch embeddings from hidden_states
_a = embedding_layer(__UpperCamelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__UpperCamelCase ):
_a = blk(__UpperCamelCase )
_a = layer_outputs[0]
if output_hidden_states:
_a = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCamelCase , hidden_states=__UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = PoolFormerConfig
UpperCAmelCase = '''poolformer'''
UpperCAmelCase = '''pixel_values'''
UpperCAmelCase = True
def a_ ( self , __UpperCamelCase ) -> Optional[Any]:
if isinstance(__UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCamelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a_ ( self , __UpperCamelCase , __UpperCamelCase=False ) -> Optional[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = value
lowercase__ = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase__ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowerCamelCase__ , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self , __UpperCamelCase ) -> Optional[Any]:
super().__init__(__UpperCamelCase )
_a = config
_a = PoolFormerEncoder(__UpperCamelCase )
# Initialize weights and apply final processing
self.post_init()
def a_ ( self ) -> List[str]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
_a = self.encoder(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase , )
_a = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __UpperCamelCase ) -> str:
super().__init__()
_a = nn.Linear(config.hidden_size , config.hidden_size )
def a_ ( self , __UpperCamelCase ) -> Optional[int]:
_a = self.dense(__UpperCamelCase )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , lowerCamelCase__ , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self , __UpperCamelCase ) -> Optional[Any]:
super().__init__(__UpperCamelCase )
_a = config.num_labels
_a = PoolFormerModel(__UpperCamelCase )
# Final norm
_a = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_a = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_a = return_dict if return_dict is not None else self.config.use_return_dict
_a = self.poolformer(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase , )
_a = outputs[0]
_a = self.classifier(self.norm(__UpperCamelCase ).mean([-2, -1] ) )
_a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a = "single_label_classification"
else:
_a = "multi_label_classification"
if self.config.problem_type == "regression":
_a = MSELoss()
if self.num_labels == 1:
_a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a = loss_fct(__UpperCamelCase , __UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
_a = CrossEntropyLoss()
_a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a = BCEWithLogitsLoss()
_a = loss_fct(__UpperCamelCase , __UpperCamelCase )
if not return_dict:
_a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states )
| 276
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase__ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ) -> List[str]:
_a = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = "A painting of a squirrel eating a burger "
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
_a = VersatileDiffusionTextToImagePipeline.from_pretrained(__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = generator.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def a_ ( self ) -> List[Any]:
_a = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = "A painting of a squirrel eating a burger "
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 276
| 1
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str:
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] , truncation=__snake_case )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
__a: Union[str, Any] = HfArgumentParser(PretokenizationArguments)
__a: List[str] = parser.parse_args()
if args.num_workers is None:
__a: List[Any] = multiprocessing.cpu_count()
__a: Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a: List[str] = time.time()
__a: str = load_dataset(args.dataset_name, split='''train''')
print(F"Dataset loaded in {time.time()-t_start:.2f}s")
__a: Union[str, Any] = time.time()
__a: List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"Dataset tokenized in {time.time()-t_start:.2f}s")
__a: Dict = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 108
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
UpperCamelCase = b.T
UpperCamelCase = np.sum(np.square(_lowercase ) , axis=1 )
UpperCamelCase = np.sum(np.square(_lowercase ) , axis=0 )
UpperCamelCase = np.matmul(_lowercase , _lowercase )
UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
UpperCamelCase = x.reshape(-1 , 3 )
UpperCamelCase = squared_euclidean_distance(_lowercase , _lowercase )
return np.argmin(_lowercase , axis=1 )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =["pixel_values"]
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = size if size is not None else {'height': 2_56, 'width': 2_56}
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ ) if clusters is not None else None
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_normalize
UpperCamelCase = do_color_quantize
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
UpperCamelCase = rescale(image=SCREAMING_SNAKE_CASE__ , scale=1 / 127.5 , data_format=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = image - 1
return image
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : str , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase = clusters if clusters is not None else self.clusters
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_color_quantize:
UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = color_quantize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase = images.shape[0]
UpperCamelCase = images.reshape(SCREAMING_SNAKE_CASE__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase = list(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
UpperCamelCase = {'input_ids': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 282
| 0
|
_UpperCAmelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
_UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 700
|
import random
from typing import Any
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list[Any]:
for _ in range(len(UpperCamelCase_ ) ):
UpperCamelCase_ = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase_ = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase_ , UpperCamelCase_ = data[b], data[a]
return data
if __name__ == "__main__":
_UpperCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_UpperCAmelCase = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 371
| 0
|
import os
from pathlib import Path
def _lowercase ( ) -> Tuple:
"""simple docstring"""
from torch.utils.cpp_extension import load
_UpperCamelCase = Path(a__ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
_UpperCamelCase = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , a__ , with_cuda=a__ , extra_include_paths=[str(a__ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 147
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__lowerCAmelCase = get_logger(__name__)
__lowerCAmelCase = Path(__file__).parent / """model_card_template.md"""
__lowerCAmelCase = uuida().hex
__lowerCAmelCase = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
__lowerCAmelCase = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
__lowerCAmelCase = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _lowercase ( a__ : Union[Dict, str, None] = None ) -> str:
"""simple docstring"""
_UpperCamelCase = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a__ , a__ ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(a__ , a__ ):
ua += "; " + user_agent
return ua
def _lowercase ( a__ : str , a__ : Optional[str] = None , a__ : Optional[str] = None ) -> int:
"""simple docstring"""
if token is None:
_UpperCamelCase = HfFolder.get_token()
if organization is None:
_UpperCamelCase = whoami(a__ )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def _lowercase ( a__ : int , a__ : Tuple ) -> List[Any]:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(a__ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
_UpperCamelCase = args.hub_token if hasattr(a__ , "hub_token" ) else None
_UpperCamelCase = get_full_repo_name(a__ , token=a__ )
_UpperCamelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a__ , model_name=a__ , repo_name=a__ , dataset_name=args.dataset_name if hasattr(a__ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a__ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(a__ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(a__ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(a__ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(a__ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(a__ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a__ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a__ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(a__ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(a__ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
_UpperCamelCase = os.path.join(args.output_dir , "README.md" )
model_card.save(a__ )
def _lowercase ( a__ : Optional[str] , a__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
_UpperCamelCase = str(Path(a__ ).as_posix() )
_UpperCamelCase = re.search(R"snapshots/([^/]+)/" , a__ )
if search is None:
return None
_UpperCamelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__lowerCAmelCase = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
__lowerCAmelCase = os.path.join(hf_cache_home, """diffusers""")
def _lowercase ( a__ : Optional[str] = None , a__ : Optional[str] = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
_UpperCamelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
_UpperCamelCase = old_diffusers_cache
_UpperCamelCase = Path(a__ ).expanduser()
_UpperCamelCase = Path(a__ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_UpperCamelCase = new_cache_dir / old_blob_path.relative_to(a__ )
new_blob_path.parent.mkdir(parents=a__ , exist_ok=a__ )
os.replace(a__ , a__ )
try:
os.symlink(a__ , a__ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__lowerCAmelCase = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
__lowerCAmelCase = 0
else:
with open(cache_version_file) as f:
try:
__lowerCAmelCase = int(f.read())
except ValueError:
__lowerCAmelCase = 0
if cache_version < 1:
__lowerCAmelCase = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
__lowerCAmelCase = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def _lowercase ( a__ : str , a__ : Optional[str] = None ) -> str:
"""simple docstring"""
if variant is not None:
_UpperCamelCase = weights_name.split("." )
_UpperCamelCase = splits[:-1] + [variant] + splits[-1:]
_UpperCamelCase = ".".join(a__ )
return weights_name
def _lowercase ( a__ : List[str] , *,
a__ : List[Any] , a__ : Optional[int] , a__ : List[str] , a__ : List[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Any , a__ : Dict , a__ : Optional[int] , a__ : List[Any] , a__ : str=None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = str(a__ )
if os.path.isfile(a__ ):
return pretrained_model_name_or_path
elif os.path.isdir(a__ ):
if os.path.isfile(os.path.join(a__ , a__ ) ):
# Load from a PyTorch checkpoint
_UpperCamelCase = os.path.join(a__ , a__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a__ , a__ , a__ ) ):
_UpperCamelCase = os.path.join(a__ , a__ , a__ )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a__ ).base_version ) >= version.parse("0.20.0" )
):
try:
_UpperCamelCase = hf_hub_download(
a__ , filename=_add_variant(a__ , a__ ) , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , user_agent=a__ , subfolder=a__ , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , a__ , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a__ , a__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(a__ , a__ )}\' so that the correct variant file can be added.''' , a__ , )
try:
# 2. Load model file as usual
_UpperCamelCase = hf_hub_download(
a__ , filename=a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , user_agent=a__ , subfolder=a__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 147
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Dict =logging.get_logger(__name__)
def __UpperCAmelCase ( UpperCamelCase__ :List[str] , UpperCamelCase__ :str=False ) -> Union[str, Any]:
snake_case__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : List[Any] = """"""
else:
snake_case__ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Tuple = in_proj_bias[: config.hidden_size]
snake_case__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Any = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( UpperCamelCase__ :Optional[int] ) -> str:
snake_case__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def __UpperCAmelCase ( UpperCamelCase__ :Dict , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :int ) -> List[str]:
snake_case__ : Tuple = dct.pop(UpperCamelCase__ )
snake_case__ : Union[str, Any] = val
def __UpperCAmelCase ( ) -> Optional[Any]:
snake_case__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : List[str] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( UpperCamelCase__ :Dict , UpperCamelCase__ :str ) -> Union[str, Any]:
snake_case__ : int = ViTConfig()
snake_case__ : Dict = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ : Union[str, Any] = True
snake_case__ : List[str] = int(vit_name[-12:-10] )
snake_case__ : int = int(vit_name[-9:-6] )
else:
snake_case__ : int = 1000
snake_case__ : Optional[int] = """huggingface/label-files"""
snake_case__ : int = """imagenet-1k-id2label.json"""
snake_case__ : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Optional[int] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case__ : Optional[Any] = idalabel
snake_case__ : Optional[int] = {v: k for k, v in idalabel.items()}
snake_case__ : List[Any] = int(vit_name[-6:-4] )
snake_case__ : Optional[Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ : List[Any] = 192
snake_case__ : List[Any] = 768
snake_case__ : Dict = 12
snake_case__ : str = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ : Optional[int] = 384
snake_case__ : Union[str, Any] = 1536
snake_case__ : int = 12
snake_case__ : Optional[int] = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ : Any = 768
snake_case__ : Tuple = 2304
snake_case__ : Optional[int] = 8
snake_case__ : Dict = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ : Tuple = 1024
snake_case__ : int = 4096
snake_case__ : List[str] = 24
snake_case__ : Any = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ : str = 1280
snake_case__ : List[Any] = 5120
snake_case__ : Any = 32
snake_case__ : Optional[Any] = 16
# load original model from timm
snake_case__ : Union[str, Any] = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Dict = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
snake_case__ : str = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : Optional[int] = ViTModel(UpperCamelCase__ ).eval()
else:
snake_case__ : str = ViTForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ : Optional[int] = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ : Optional[int] = ViTImageProcessor(size=config.image_size )
snake_case__ : Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ : Union[str, Any] = encoding["""pixel_values"""]
snake_case__ : Dict = model(UpperCamelCase__ )
if base_model:
snake_case__ : List[str] = timm_model.forward_features(UpperCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase__ , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ : List[str] = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowercase : Tuple =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 714
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
if len(UpperCamelCase__ ) != 32:
raise ValueError('''Input must be of length 32''' )
snake_case__ : Any = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
snake_case__ : Union[str, Any] = format(UpperCamelCase__ , '''08x''' )[-8:]
snake_case__ : Dict = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
snake_case__ : Optional[Any] = B''''''
for char in message:
bit_string += format(UpperCamelCase__ , '''08b''' ).encode('''utf-8''' )
snake_case__ : List[str] = format(len(UpperCamelCase__ ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> Generator[list[int], None, None]:
if len(UpperCamelCase__ ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase__ ) , 512 ):
snake_case__ : Union[str, Any] = bit_string[pos : pos + 512]
snake_case__ : Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
snake_case__ : Tuple = format(UpperCamelCase__ , '''032b''' )
snake_case__ : Any = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase__ , 2 )
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
return (a + b) % 2**32
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
snake_case__ : int = preprocess(UpperCamelCase__ )
snake_case__ : str = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
snake_case__ : List[str] = 0x67452301
snake_case__ : Any = 0xefcdab89
snake_case__ : List[Any] = 0x98badcfe
snake_case__ : int = 0x10325476
snake_case__ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase__ ):
snake_case__ : Dict = aa
snake_case__ : Tuple = ba
snake_case__ : Any = ca
snake_case__ : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case__ : Dict = d ^ (b & (c ^ d))
snake_case__ : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case__ : Optional[Any] = c ^ (d & (b ^ c))
snake_case__ : Tuple = (5 * i + 1) % 16
elif i <= 47:
snake_case__ : Union[str, Any] = b ^ c ^ d
snake_case__ : List[str] = (3 * i + 5) % 16
else:
snake_case__ : int = c ^ (b | not_aa(UpperCamelCase__ ))
snake_case__ : Optional[Any] = (7 * i) % 16
snake_case__ : List[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
snake_case__ : Optional[int] = d
snake_case__ : Dict = c
snake_case__ : Dict = b
snake_case__ : int = sum_aa(UpperCamelCase__ , left_rotate_aa(UpperCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : List[Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Optional[int] = reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574
| 0
|
def _lowercase ( lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
__lowerCAmelCase, __lowerCAmelCase : int = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 492
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
def _lowercase ( lowercase__ , lowercase__=1_0_0 , lowercase__=" " ):
__lowerCAmelCase : List[str] = text.split(lowercase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )]
def _lowercase ( lowercase__ ):
__lowerCAmelCase, __lowerCAmelCase : List[str] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(lowercase__ ):
titles.append(title if title is not None else '''''' )
texts.append(lowercase__ )
return {"title": titles, "text": texts}
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[Any] = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=lowercase__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__lowerCAmelCase : Dict = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ):
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__lowerCAmelCase : int = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__lowerCAmelCase : int = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
__lowerCAmelCase : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ )
__lowerCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__lowerCAmelCase : Any = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__lowerCAmelCase : List[str] = dataset.map(
partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , )
# And finally save your dataset
__lowerCAmelCase : Optional[int] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(lowercase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__lowerCAmelCase : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=lowercase__ )
# And save the index
__lowerCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(lowercase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowercase :
_UpperCamelCase = field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_UpperCamelCase = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_UpperCamelCase = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_UpperCamelCase = field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class __lowercase :
_UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_UpperCamelCase = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class __lowercase :
_UpperCamelCase = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_UpperCamelCase = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 492
| 1
|
import baseaa
def a (lowerCAmelCase__ ):
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def a (lowerCAmelCase__ ):
return baseaa.aaadecode(lowerCAmelCase__ ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209
|
from math import ceil, sqrt
def a (lowerCAmelCase__ = 1_000_000 ):
__a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 209
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 661
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 661
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Any = "vit"
def __init__( self : Tuple , snake_case : List[Any]=768 , snake_case : Any=12 , snake_case : List[Any]=12 , snake_case : List[str]=3072 , snake_case : Optional[Any]="gelu" , snake_case : int=0.0 , snake_case : Optional[int]=0.0 , snake_case : List[Any]=0.02 , snake_case : Tuple=1E-12 , snake_case : List[Any]=224 , snake_case : List[Any]=16 , snake_case : Tuple=3 , snake_case : Any=True , snake_case : Any=16 , **snake_case : str , ):
super().__init__(**snake_case )
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = qkv_bias
__UpperCamelCase = encoder_stride
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : str = version.parse("1.11" )
@property
def snake_case ( self : int ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self : List[str] ):
return 1E-4
| 375
|
import os
import time
import numpy as np
import onnxruntime as ort
a_ = "1"
a_ = "0"
a_ = "1"
a_ = ort.SessionOptions()
a_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
a_ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
a_ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
a_ = ort.RunOptions()
a_ = 128
a_ = 1
a_ = np.ones((batch, sequence), dtype=np.intaa)
a_ = np.ones((batch, sequence), dtype=np.intaa)
a_ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
a_ = time.time()
a_ = 2000
a_ = {}
for iter in range(max_iters):
a_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
| 375
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 544
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Dict = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __magic_name__ ( __lowerCAmelCase : List[str]=None ) -> List[str]:
if subparsers is not None:
__lowerCamelCase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__lowerCamelCase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__lowerCamelCase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__lowerCamelCase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[Any]:
__lowerCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
__lowerCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowerCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowerCamelCase = defaults.commands
if not args.tpu_name:
__lowerCamelCase = defaults.tpu_name
if not args.tpu_zone:
__lowerCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowerCamelCase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__lowerCamelCase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
__lowerCamelCase = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__lowerCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
__lowerCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowerCamelCase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
__lowerCamelCase = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowerCamelCase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {' '.join(__lowerCAmelCase )}''' )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def __magic_name__ ( ) -> Dict:
__lowerCamelCase = tpu_command_parser()
__lowerCamelCase = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 298
| 0
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 597
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="wavlm"
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.02 , _A=1E-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=320 , _A=800 , _A=False , _A=True , _A=0.05 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=80 , _A=0 , _A=1 , _A=2 , _A=False , _A=3 , _A=2 , _A=3 , _A=None , **_A , ) -> str:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_buckets
SCREAMING_SNAKE_CASE_ = max_bucket_distance
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_ctc_classes
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ = num_codevector_groups
SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ = num_negatives
SCREAMING_SNAKE_CASE_ = codevector_dim
SCREAMING_SNAKE_CASE_ = proj_codevector_dim
SCREAMING_SNAKE_CASE_ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ = add_adapter
SCREAMING_SNAKE_CASE_ = adapter_kernel_size
SCREAMING_SNAKE_CASE_ = adapter_stride
SCREAMING_SNAKE_CASE_ = num_adapter_layers
SCREAMING_SNAKE_CASE_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = xvector_output_dim
@property
def _UpperCamelCase ( self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 597
| 1
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_)
| 34
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 187
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
lowercase = torch.nn.Linear(2, 4 )
lowercase = torch.optim.AdamW(model.parameters(), lr=1.0 )
lowercase = torch.optim.lr_scheduler.OneCycleLR(_lowerCAmelCase, max_lr=0.01, steps_per_epoch=2, epochs=1 )
lowercase = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowercase = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __UpperCAmelCase ( UpperCAmelCase )-> Tuple:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
lowercase = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCAmelCase )
class __lowercase ( __snake_case ):
@require_cuda
def __a ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(A_ ):
lowercase = Accelerator(cpu=A_ )
def __a ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase = Accelerator()
lowercase = GradientState()
assert state.num_steps == 1
lowercase = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase = False
assert state.sync_gradients is False
GradientState._reset_state()
def __a ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = Accelerator()
lowercase = create_components()
(
lowercase
) = accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __a ( self : Any ) -> int:
'''simple docstring'''
lowercase = Accelerator()
lowercase = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __a ( self : Optional[int] ) -> int:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowerCamelCase : Tuple , **__lowerCamelCase : Any ):
pass
with patch('''torch.cuda.set_device''' , A_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
lowercase = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def __a ( self : List[str] ) -> str:
'''simple docstring'''
lowercase = Accelerator()
lowercase = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
lowercase = get_signature(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1E-3 )
def __a ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase = Accelerator()
lowercase = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
lowercase = get_signature(A_ )
# saving hook
def save_config(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
lowercase = {"class_name": models[0].__class__.__name__}
with open(os.path.join(A_ , '''data.json''' ) , '''w''' ) as f:
json.dump(A_ , A_ )
# loading hook
def load_config(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
with open(os.path.join(A_ , '''data.json''' ) , '''r''' ) as f:
lowercase = json.load(A_ )
lowercase = config["class_name"]
lowercase = accelerator.register_save_state_pre_hook(A_ )
lowercase = accelerator.register_load_state_pre_hook(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase = "random"
# make sure loaded weights match with hooks
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks removed
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = Accelerator()
lowercase = create_components()
lowercase = None
# This should work
lowercase = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ , A_ )
self.assertTrue(dummy_obj is None )
def __a ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase = Accelerator()
lowercase = create_components()
lowercase = [1, 2, 3]
# This should work
lowercase = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ , A_ )
self.assertEqual(
getattr(A_ , '''_is_accelerate_prepared''' , A_ ) , A_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(A_ , '''_is_accelerate_prepared''' , A_ ) , A_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(A_ , '''_is_accelerate_prepared''' , A_ ) , A_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(A_ , '''_is_accelerate_prepared''' , A_ ) , A_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(A_ , '''_is_accelerate_prepared''' , A_ ) , A_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(A_ , '''_is_accelerate_prepared''' , A_ ) , A_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def __a ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowercase = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=A_ , device_map={'''''': 0} , )
lowercase = Accelerator()
# This should work
lowercase = accelerator.prepare(A_ )
@slow
@require_bnb
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowercase = Accelerator()
with init_empty_weights():
lowercase = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowercase = infer_auto_device_map(A_ )
lowercase = "cpu"
lowercase = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=A_ , load_in_abit=A_ , llm_inta_enable_fpaa_cpu_offload=A_ )
# This should not work and get value error
with self.assertRaises(A_ ):
lowercase = accelerator.prepare(A_ )
@slow
@require_bnb
@require_multi_gpu
def __a ( self : str ) -> Any:
'''simple docstring'''
from transformers import AutoModelForCausalLM
lowercase = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowercase = infer_auto_device_map(A_ )
lowercase = 1
lowercase = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=A_ , device_map=A_ , )
lowercase = Accelerator()
# This should not work and get value error
with self.assertRaises(A_ ):
lowercase = accelerator.prepare(A_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __a ( self : str ) -> Dict:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
lowercase = infer_auto_device_map(A_ )
lowercase = 1
lowercase = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=A_ , device_map=A_ , )
lowercase = Accelerator()
# This should work
lowercase = accelerator.prepare(A_ )
@require_cuda
def __a ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase = torch.nn.Linear(10 , 10 )
lowercase = torch.optim.SGD(model.parameters() , lr=0.01 )
lowercase = Accelerator(cpu=A_ )
lowercase = accelerator.prepare(A_ )
| 716
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A_ = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class __lowercase :
lowercase = 42
lowercase = None
lowercase = None
lowercase = None
lowercase = None
def __a ( self : int ) -> str:
'''simple docstring'''
lowercase ,lowercase ,lowercase = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def __a ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.major, self.minor, self.patch
def __a ( self : Optional[Any] , __lowerCamelCase : Any ) -> Dict:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return Version(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return other
raise TypeError(f'{other} (type {type(__lowerCamelCase )}) cannot be compared to version.' )
def __eq__( self : Tuple , __lowerCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
try:
lowercase = self._validate_operand(__lowerCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Any , __lowerCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = self._validate_operand(__lowerCamelCase )
return self.tuple < other.tuple
def __hash__( self : int ) -> str:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __a ( cls : List[str] , __lowerCamelCase : Any ) -> int:
'''simple docstring'''
lowercase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __a ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.version_str
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = _VERSION_REG.match(UpperCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __UpperCAmelCase ( UpperCAmelCase )-> int:
"""simple docstring"""
return ".".join(str(UpperCAmelCase ) for v in version_tuple )
| 479
| 0
|
from __future__ import annotations
from typing import Any
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : str = 6 ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
self.create_linked_list(_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = Node()
SCREAMING_SNAKE_CASE_ = current_node
SCREAMING_SNAKE_CASE_ = current_node
SCREAMING_SNAKE_CASE_ = current_node
for _ in range(1 , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = Node()
SCREAMING_SNAKE_CASE_ = current_node
SCREAMING_SNAKE_CASE_ = previous_node
SCREAMING_SNAKE_CASE_ = current_node
SCREAMING_SNAKE_CASE_ = self.front
SCREAMING_SNAKE_CASE_ = previous_node
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE_ = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE_ = data
def lowerCAmelCase_ ( self : Optional[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE_ = self.front.data
SCREAMING_SNAKE_CASE_ = None
return data
SCREAMING_SNAKE_CASE_ = self.front
SCREAMING_SNAKE_CASE_ = old_front.next
SCREAMING_SNAKE_CASE_ = old_front.data
SCREAMING_SNAKE_CASE_ = None
return data
def lowerCAmelCase_ ( self : Optional[int] ):
if self.is_empty():
raise Exception('Empty Queue' )
def lowerCAmelCase_ ( self : int ):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "trocr"
_UpperCAmelCase :str = ["past_key_values"]
_UpperCAmelCase :Any = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
lowercase__: List[Any] = vocab_size
lowercase__: List[str] = d_model
lowercase__: Tuple = decoder_layers
lowercase__: Any = decoder_attention_heads
lowercase__: Dict = decoder_ffn_dim
lowercase__: Union[str, Any] = activation_function
lowercase__: List[str] = max_position_embeddings
lowercase__: Optional[Any] = dropout
lowercase__: Tuple = attention_dropout
lowercase__: List[str] = activation_dropout
lowercase__: Union[str, Any] = init_std
lowercase__: str = decoder_layerdrop
lowercase__: int = use_cache
lowercase__: str = scale_embedding
lowercase__: List[str] = use_learned_position_embeddings
lowercase__: List[Any] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 586
| 0
|
"""simple docstring"""
def __UpperCamelCase ( snake_case__ ):
'''simple docstring'''
A_ : Any = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
A_ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
A_ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
A_ : Optional[Any] = j
return prefix_result
def __UpperCamelCase ( snake_case__ ):
'''simple docstring'''
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
"""simple docstring"""
import math
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase_ = get_logger()
lowercase_ = None
class SCREAMING_SNAKE_CASE (TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : List[str] , a : Optional[int]=None , a : List[Any]=None , **a : str )-> Any:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowercase__ = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
lowercase__ = str(jax.devices()[0] )
lowercase__ = jnp_array_kwargs
@staticmethod
def SCREAMING_SNAKE_CASE_ ( )-> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] )-> Any:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def SCREAMING_SNAKE_CASE_ ( self : int , a : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase__ = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase__ = {'dtype': jnp.intaa}
else:
lowercase__ = {'dtype': jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase__ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
lowercase__ = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Dict )-> List[Any]:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , '__array__' ) and not isinstance(a , jax.Array ):
lowercase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : dict )-> Union[str, Any]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : pa.Table )-> Mapping:
"""simple docstring"""
lowercase__ = self.numpy_arrow_extractor().extract_row(a )
lowercase__ = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : pa.Table )-> "jax.Array":
"""simple docstring"""
lowercase__ = self.numpy_arrow_extractor().extract_column(a )
lowercase__ = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
lowercase__ = self.recursive_tensorize(a )
lowercase__ = self._consolidate(a )
return column
def SCREAMING_SNAKE_CASE_ ( self : str , a : pa.Table )-> Mapping:
"""simple docstring"""
lowercase__ = self.numpy_arrow_extractor().extract_batch(a )
lowercase__ = self.python_features_decoder.decode_batch(a )
lowercase__ = self.recursive_tensorize(a )
for column_name in batch:
lowercase__ = self._consolidate(batch[column_name] )
return batch
| 235
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : List[Any] , a : int = 768 , )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Parameter(torch.zeros(1 , a ) )
lowercase__ = nn.Parameter(torch.ones(1 , a ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Optional[Union[str, torch.device]] = None , a : Optional[torch.dtype] = None , )-> Dict:
"""simple docstring"""
lowercase__ = nn.Parameter(self.mean.to(a ).to(a ) )
lowercase__ = nn.Parameter(self.std.to(a ).to(a ) )
return self
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict )-> Dict:
"""simple docstring"""
lowercase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = (embeds * self.std) + self.mean
return embeds
| 235
| 1
|
"""simple docstring"""
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: str ) -> Optional[int]:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
_a = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
UpperCamelCase = 701
UpperCamelCase = 1_000_000_000
UpperCamelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 705
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 612
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> float:
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
lowerCAmelCase__ : List[Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase__ ) )
return round(lowercase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 453
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_UpperCamelCase = TaTokenizerFast
_UpperCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_UpperCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 453
| 1
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _A ( A ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _A ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase : str = [1, 2, 3]
with pytest.raises(A ):
with parallel_backend("unsupported backend" ):
map_nested(A ,A ,num_proc=2 )
with pytest.raises(A ):
with parallel_backend("unsupported backend" ):
map_nested(A ,A ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def _A ( A ) -> str:
lowercase : int = [1, 2]
lowercase : int = {"a": 1, "b": 2}
lowercase : List[str] = {"a": [1, 2], "b": [3, 4]}
lowercase : List[Any] = {"a": {"1": 1}, "b": 2}
lowercase : Any = {"a": 1, "b": 2, "c": 3, "d": 4}
lowercase : List[str] = [2, 3]
lowercase : Optional[int] = {"a": 2, "b": 3}
lowercase : List[Any] = {"a": [2, 3], "b": [4, 5]}
lowercase : Dict = {"a": {"1": 2}, "b": 3}
lowercase : List[str] = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(A ,A ,num_proc=A ) == expected_map_nested_sa
assert map_nested(A ,A ,num_proc=A ) == expected_map_nested_sa
assert map_nested(A ,A ,num_proc=A ) == expected_map_nested_sa
assert map_nested(A ,A ,num_proc=A ) == expected_map_nested_sa
assert map_nested(A ,A ,num_proc=A ) == expected_map_nested_sa
| 425
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase : Any = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase : Dict = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase : Any = F'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : int = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase : Dict = F'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : Tuple = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase : str = F'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase : Optional[Any] = F'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : Any = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase : Optional[int] = F'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase : List[str] = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase : Union[str, Any] = F'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : Optional[Any] = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase : Optional[Any] = """mid_block.attentions.0."""
lowerCAmelCase : Optional[int] = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase : str = F'''mid_block.resnets.{j}.'''
lowerCAmelCase : int = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( A ) -> str:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowercase : List[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase : Any = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase : Optional[Any] = v.replace(A ,A )
lowercase : List[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase : Any = v.replace(A ,A )
lowercase : Tuple = v
lowercase : Any = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase : List[Any] = F'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : Optional[Any] = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase : Tuple = F'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase : Any = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : int = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase : int = F'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase : Any = F'''mid_block.resnets.{i}.'''
lowerCAmelCase : Optional[int] = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase : int = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def _A ( A ) -> Optional[Any]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape ,1 ,1 )
def _A ( A ) -> List[str]:
lowercase : Tuple = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase : Union[str, Any] = v.replace(A ,A )
lowercase : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase : str = v.replace(A ,A )
lowercase : Dict = v
lowercase : int = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase : Tuple = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
lowercase : List[Any] = reshape_weight_for_sd(A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase : Any = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase : int = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase : List[Any] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase : Optional[Any] = {"""q""": 0, """k""": 1, """v""": 2}
def _A ( A ) -> List[Any]:
lowercase : List[Any] = {}
lowercase : Optional[Any] = {}
lowercase : Optional[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase : int = k[: -len(".q_proj.weight" )]
lowercase : List[str] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase : Tuple = [None, None, None]
lowercase : List[str] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase : int = k[: -len(".q_proj.bias" )]
lowercase : str = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase : Any = [None, None, None]
lowercase : Tuple = v
continue
lowercase : str = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A )
lowercase : Any = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase : List[str] = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A )
lowercase : List[Any] = torch.cat(A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase : Tuple = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A )
lowercase : int = torch.cat(A )
return new_state_dict
def _A ( A ) -> Union[str, Any]:
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase : List[str] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase : List[str] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : Optional[Any] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase : str = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase : List[str] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Any = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase : Dict = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase : Optional[int] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Dict = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase : Optional[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase : Union[str, Any] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase : Any = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase : Any = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase : Optional[Any] = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase : Optional[Any] = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase : Any = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase : str = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase : Dict = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase : Any = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 425
| 1
|
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =np.shape(__snake_case )
if rows != columns:
UpperCAmelCase__ =(
'\'table\' has to be of square shaped array but got a '
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(__snake_case )
UpperCAmelCase__ =np.zeros((rows, columns) )
UpperCAmelCase__ =np.zeros((rows, columns) )
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase__ =sum(lower[i][k] * upper[k][j] for k in range(__snake_case ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
UpperCAmelCase__ =(table[i][j] - total) / upper[j][j]
UpperCAmelCase__ =1
for j in range(__snake_case , __snake_case ):
UpperCAmelCase__ =sum(lower[i][k] * upper[k][j] for k in range(__snake_case ) )
UpperCAmelCase__ =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCamelCase (ctypes.Structure ):
'''simple docstring'''
_snake_case : str = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowercase__ ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase_ : Optional[int] = CursorInfo()
UpperCAmelCase_ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
UpperCAmelCase_ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowercase__ ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase_ : Tuple = CursorInfo()
UpperCAmelCase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowercase__ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 406
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a : Union[str, Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
a : Tuple = {"mobilebert-uncased": 5_12}
a : Union[str, Any] = {}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : int = MobileBertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
UpperCAmelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase : List[Any] = getattr(UpperCAmelCase__ , normalizer_state.pop("type" ) )
UpperCAmelCase : Union[str, Any] = do_lower_case
UpperCAmelCase : Any = strip_accents
UpperCAmelCase : Optional[int] = tokenize_chinese_chars
UpperCAmelCase : Tuple = normalizer_class(**UpperCAmelCase__ )
UpperCAmelCase : Union[str, Any] = do_lower_case
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 721
|
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ResnetDownsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[Any] = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = AttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = CrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : str = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[int] = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : List[str] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = SkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AttnSkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = AttnDownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = UNetMidBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "mid"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UNetMidBlockaDCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[int] = "mid"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "mid"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = UpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ResnetUpsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = CrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Any = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = SimpleCrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case , include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Optional[int] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = SkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = UpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase : Any = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AttnUpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(snake_case )
| 609
| 0
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict = 1_6 ) -> List[Any]:
'''simple docstring'''
a__ : str = AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : Dict = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase__ : int ):
# max_length=None => use the model max length (it's actually the default)
a__ : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : str = datasets.map(
a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : Dict = 1_6
elif accelerator.mixed_precision != "no":
a__ : Optional[int] = 8
else:
a__ : Optional[Any] = None
return tokenizer.pad(
a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
a__ : Any = DataLoader(
tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1":
a__ : Tuple = 2
# New Code #
a__ : Any = int(args.gradient_accumulation_steps )
a__ : Optional[Any] = int(args.local_sgd_steps )
# Initialize accelerator
a__ : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=a_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : List[Any] = config["lr"]
a__ : List[str] = int(config["num_epochs"] )
a__ : List[Any] = int(config["seed"] )
a__ : int = int(config["batch_size"] )
a__ : Dict = evaluate.load("glue" , "mrpc" )
set_seed(a_ )
a__ , a__ : Dict = get_dataloaders(a_ , a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : Any = model.to(accelerator.device )
# Instantiate optimizer
a__ : Union[str, Any] = AdamW(params=model.parameters() , lr=a_ )
# Instantiate scheduler
a__ : Tuple = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=1_0_0 , num_training_steps=(len(a_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Dict = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
with LocalSGD(
accelerator=a_ , model=a_ , local_sgd_steps=a_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(a_ ):
a__ : List[Any] = model(**a_ )
a__ : str = output.loss
accelerator.backward(a_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : List[str] = model(**a_ )
a__ : Dict = outputs.logits.argmax(dim=-1 )
a__ , a__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=a_ , references=a_ , )
a__ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , a_ )
def lowercase__ ( ) -> int:
'''simple docstring'''
a__ : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=a_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=a_ , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : Any = parser.parse_args()
a__ : str = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 642
|
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""sentencepiece"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["sentencepiece"] )
| 282
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : Optional[int] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : int = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : bool =True
__UpperCAmelCase : Optional[str] =None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] ="PIL.Image.Image"
__UpperCAmelCase : ClassVar[Any] =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__UpperCAmelCase : str =field(default="""Image""" ,init=lowerCAmelCase__ ,repr=lowerCAmelCase__ )
def __call__( self ):
return self.pa_type
def snake_case ( self , __a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__a , __a ):
__lowerCAmelCase = np.array(__a )
if isinstance(__a , __a ):
return {"path": value, "bytes": None}
elif isinstance(__a , __a ):
return {"path": None, "bytes": value}
elif isinstance(__a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__a )
elif isinstance(__a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def snake_case ( self , __a , __a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
__lowerCAmelCase = {}
__lowerCAmelCase , __lowerCAmelCase = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__a ):
__lowerCAmelCase = PIL.Image.open(__a )
else:
__lowerCAmelCase = path.split("::" )[-1]
try:
__lowerCAmelCase = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
__lowerCAmelCase = token_per_repo_id.get(__a )
except ValueError:
__lowerCAmelCase = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
__lowerCAmelCase = BytesIO(f.read() )
__lowerCAmelCase = PIL.Image.open(bytes_ )
else:
__lowerCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def snake_case ( self , __a ):
if pa.types.is_string(storage.type ):
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.binary() )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__lowerCAmelCase = storage.field("bytes" )
else:
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__lowerCAmelCase = storage.field("path" )
else:
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__lowerCAmelCase = pa.array(
[encode_np_array(np.array(__a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
def snake_case ( self , __a ):
@no_op_if_value_is_null
def path_to_bytes(__a ):
with xopen(__a , "rb" ) as f:
__lowerCAmelCase = f.read()
return bytes_
__lowerCAmelCase = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCAmelCase = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
def _lowerCamelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__lowerCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
__lowerCAmelCase = image.format
else:
__lowerCAmelCase = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
__lowerCAmelCase = array.dtype
__lowerCAmelCase = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
__lowerCAmelCase = dtype.kind
__lowerCAmelCase = dtype.itemsize
__lowerCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__lowerCAmelCase = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__lowerCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__lowerCAmelCase = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
__lowerCAmelCase = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
__lowerCAmelCase = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
__lowerCAmelCase , __lowerCAmelCase = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
__lowerCAmelCase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
__lowerCAmelCase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 282
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
_lowerCAmelCase = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCAmelCase = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCAmelCase = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCAmelCase = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCAmelCase = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCAmelCase = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCAmelCase = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCAmelCase = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCAmelCase = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCAmelCase = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCAmelCase = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCAmelCase = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCAmelCase = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCAmelCase = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCAmelCase = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCAmelCase = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCAmelCase = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCAmelCase = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCAmelCase = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCAmelCase = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCAmelCase = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCAmelCase = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCAmelCase = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCAmelCase = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase = key.split("." )
_lowerCAmelCase , _lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
_lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase = key.split("." )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[
dim : dim * 2, :
]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCAmelCase = val.squeeze_()
else:
_lowerCAmelCase = val
return orig_state_dict
def __a():
'''simple docstring'''
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
'''simple docstring'''
_lowerCAmelCase = GroupViTConfig()
_lowerCAmelCase = GroupViTModel(SCREAMING_SNAKE_CASE_ ).eval()
_lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
_lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE_ ) == 0)
# verify result
_lowerCAmelCase = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(text=["a photo of a cat", "a photo of a dog"] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
with torch.no_grad():
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
if model_name == "groupvit-gcc-yfcc":
_lowerCAmelCase = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCAmelCase = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("Successfully saved processor and model to" , SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 18
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : int = 101 ):
__lowercase : Tuple = length
def __len__( self : str ):
return self.length
def __getitem__( self : List[Any] , _snake_case : List[str] ):
return i
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : Union[str, Any] , _snake_case : List[str] ):
return {"input_ids": torch.tensor(_snake_case ), "labels": torch.tensor(_snake_case )}
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowercase : str = nn.Linear(120 , 80 )
def snake_case_ ( self : Tuple , _snake_case : List[str] , _snake_case : Optional[int]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def snake_case_ ( self : Tuple ):
__lowercase : List[str] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase : Any = self.get_auto_remove_tmp_dir()
__lowercase : List[str] = F'--output_dir {output_dir}'.split()
__lowercase : Optional[Any] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def snake_case_ ( self : str ):
__lowercase : Tuple = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase : Dict = self.get_auto_remove_tmp_dir()
__lowercase : str = F'--output_dir {output_dir}'.split()
__lowercase : Dict = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase : int = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__lowerCAmelCase : str = DummyDataset(dataset_length)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase : Dict = list(range(len(__lowerCAmelCase ) ) )
__lowercase : Optional[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Any = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : List[str] = None
| 509
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCAmelCase ( unittest.TestCase):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=4_00 , snake_case_=True , snake_case_=None , snake_case_=True , ):
_snake_case : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
_snake_case : Optional[int] = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = num_channels
_snake_case : List[str] = image_size
_snake_case : Optional[int] = min_resolution
_snake_case : str = max_resolution
_snake_case : Dict = do_resize
_snake_case : Optional[int] = size
_snake_case : List[str] = do_normalize
def lowerCamelCase__ ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self ):
_snake_case : List[str] = ImageGPTImageProcessingTester(self )
@property
def lowerCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "clusters" ) )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "do_normalize" ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
_snake_case : Dict = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : List[Any] = os.path.join(snake_case_ , "image_processor.json" )
image_processor_first.to_json_file(snake_case_ )
_snake_case : str = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
_snake_case : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
_snake_case : Any = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
_snake_case : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def lowerCamelCase__ ( self ):
pass
def a__ ( ):
"""simple docstring"""
_snake_case : Optional[int] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
_snake_case : List[str] = Image.open(dataset[4]["file"] )
_snake_case : List[str] = Image.open(dataset[5]["file"] )
_snake_case : Union[str, Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
_snake_case : Any = prepare_images()
# test non-batched
_snake_case : int = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_snake_case : Optional[Any] = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
_snake_case : Dict = image_processing(snake_case_ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_snake_case : Any = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 714
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87
| 0
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 10**-10 ):
'''simple docstring'''
__lowercase = a
while True:
__lowercase = Decimal(lowerCamelCase ) - (
Decimal(eval(lowerCamelCase ) ) / Decimal(eval(str(diff(lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCamelCase ) ) < precision: # noqa: S307
return float(lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 80
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=2 , A__=32 , A__=16 , A__=3 , A__=True , A__=True , A__=32 , A__=4 , A__=[0, 1, 2, 3] , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.02 , A__=3 , A__=[1, 3_84, 24, 24] , A__=True , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = backbone_out_indices
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = backbone_featmap_shape
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = DPTModel(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , labels=A__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = DPTModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = True
if model_class in get_values(A__ ):
continue
_SCREAMING_SNAKE_CASE = model_class(A__ )
model.to(A__ )
model.train()
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ , return_labels=A__ )
_SCREAMING_SNAKE_CASE = model(**A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
if model_class in get_values(A__ ) or not model_class.supports_gradient_checkpointing:
continue
_SCREAMING_SNAKE_CASE = model_class(A__ )
model.to(A__ )
model.gradient_checkpointing_enable()
model.train()
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ , return_labels=A__ )
_SCREAMING_SNAKE_CASE = model(**A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(A__ )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=A__ )
# Skip the check for the backbone
_SCREAMING_SNAKE_CASE = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self ) -> Any:
pass
@slow
def UpperCamelCase ( self ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_SCREAMING_SNAKE_CASE = DPTModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCamelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = """add"""
with self.assertRaises(A__ ):
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation(A__ )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(A__ )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A__ )
_SCREAMING_SNAKE_CASE = outputs.predicted_depth
# verify the predicted depth
_SCREAMING_SNAKE_CASE = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , A__ , atol=1E-4 ) )
| 591
| 0
|
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__UpperCAmelCase = get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''all_checks'''
SCREAMING_SNAKE_CASE__ = '''basic_checks'''
SCREAMING_SNAKE_CASE__ = '''no_checks'''
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ) -> Union[str, Any]:
"""simple docstring"""
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) )
if len(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : int = """ for """ + verification_name if verification_name is not None else """"""
if len(lowerCamelCase_ ) > 0:
raise NonMatchingChecksumError(
f'''Checksums didn\'t match{for_verification_name}:\n'''
f'''{bad_urls}\n'''
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __A ( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) )
if len(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) )
SCREAMING_SNAKE_CASE : List[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCamelCase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCamelCase_ ) )
logger.info("""All the splits matched successfully.""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ = True ) -> Optional[Any]:
"""simple docstring"""
if record_checksum:
SCREAMING_SNAKE_CASE : Union[str, Any] = shaaaa()
with open(lowerCamelCase_ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : Any = None
return {"num_bytes": os.path.getsize(lowerCamelCase_ ), "checksum": checksum}
def __A ( lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 717
|
'''simple docstring'''
__UpperCAmelCase = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 79
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""only integers accepted as input""" )
else:
snake_case__ : str = str(abs(__magic_name__ ) )
snake_case__ : Dict = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("""""".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 38
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = parent
lowercase_ : Optional[int] = 13
lowercase_ : List[Any] = 7
lowercase_ : str = True
lowercase_ : Any = True
lowercase_ : str = True
lowercase_ : Any = True
lowercase_ : str = True
lowercase_ : List[str] = False
lowercase_ : Optional[int] = False
lowercase_ : Dict = False
lowercase_ : Dict = 2
lowercase_ : Optional[Any] = 99
lowercase_ : Optional[Any] = 0
lowercase_ : str = 32
lowercase_ : int = 2
lowercase_ : List[str] = 4
lowercase_ : List[str] = 0.1
lowercase_ : Optional[int] = 0.1
lowercase_ : Dict = 512
lowercase_ : str = 16
lowercase_ : Optional[Any] = 2
lowercase_ : Union[str, Any] = 0.02
lowercase_ : int = 3
lowercase_ : List[str] = 4
lowercase_ : List[str] = 'last'
lowercase_ : Optional[int] = True
lowercase_ : Optional[Any] = None
lowercase_ : str = 0
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
lowercase_ : Union[str, Any] = None
if self.use_input_lengths:
lowercase_ : Dict = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase_ : Dict = None
if self.use_token_type_ids:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowercase_ : Optional[Any] = None
lowercase_ : Optional[Any] = None
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : Dict = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
lowercase_ : Dict = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : Optional[int] = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[Any] = TFFlaubertModel(config=__UpperCamelCase )
lowercase_ : Any = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowercase_ : int = model(__UpperCamelCase )
lowercase_ : List[str] = [input_ids, input_mask]
lowercase_ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = TFFlaubertWithLMHeadModel(__UpperCamelCase )
lowercase_ : Tuple = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : List[str] = TFFlaubertForQuestionAnsweringSimple(__UpperCamelCase )
lowercase_ : List[str] = {'input_ids': input_ids, 'lengths': input_lengths}
lowercase_ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = TFFlaubertForSequenceClassification(__UpperCamelCase )
lowercase_ : Any = {'input_ids': input_ids, 'lengths': input_lengths}
lowercase_ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Dict:
'''simple docstring'''
lowercase_ : Any = self.num_labels
lowercase_ : Tuple = TFFlaubertForTokenClassification(config=__UpperCamelCase )
lowercase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = self.num_choices
lowercase_ : Any = TFFlaubertForMultipleChoice(config=__UpperCamelCase )
lowercase_ : Optional[Any] = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : Dict = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : Optional[Any] = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase_ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = TFFlaubertModelTester(self )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,emb_dim=37 )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = TFFlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowercase_ : Dict = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,__UpperCamelCase )
# compare the actual values for a slice.
lowercase_ : Union[str, Any] = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 477
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__SCREAMING_SNAKE_CASE ="2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__SCREAMING_SNAKE_CASE =concatenate_datasets
__SCREAMING_SNAKE_CASE =DownloadConfig
__SCREAMING_SNAKE_CASE =DownloadManager
__SCREAMING_SNAKE_CASE =DownloadMode
__SCREAMING_SNAKE_CASE =DownloadConfig
__SCREAMING_SNAKE_CASE =DownloadMode
__SCREAMING_SNAKE_CASE =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 477
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = torch.device("""cpu""")
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : Any = Image.open(requests.get(__a , stream=__a ).raw )
return im
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def UpperCAmelCase_ (__a : List[Any] , __a : List[str] , __a : Tuple ):
"""simple docstring"""
_a : Union[str, Any] = dct.pop(__a )
_a : List[str] = val
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : Union[str, Any] = []
for k in state_dict.keys():
_a : int = k
if ".pwconv" in k:
_a : List[str] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
_a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
_a : Union[str, Any] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
_a : Tuple = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_a : List[Any] = k_new.split('.' )
if ls[2].isdigit():
_a : str = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_a : Any = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Dict ):
"""simple docstring"""
_a : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : Optional[Any] = 1_0_0_0
_a : Any = 'huggingface/label-files'
_a : Dict = 'imagenet-1k-id2label.json'
_a : List[Any] = json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
_a : Optional[Any] = {int(__a ): v for k, v in idalabel.items()}
_a : List[Any] = idalabel
_a : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : str = [3, 3, 6, 4]
_a : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : Dict = [3, 3, 9, 6]
_a : Union[str, Any] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : Union[str, Any] = [4, 3, 1_0, 5]
_a : Tuple = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : int = [4, 4, 1_2, 6]
_a : Tuple = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_a : Union[str, Any] = torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
_a : List[Any] = torch.load(__a , map_location='cpu' )
_a : Optional[int] = checkpoint
_a : Any = create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
_a : Optional[Any] = SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
_a : Optional[int] = prepare_img()
_a : int = ViTImageProcessor.from_pretrained('preprocessor_config' )
_a : List[Any] = processor(images=__a , return_tensors='pt' )
# compare outputs from both models
_a : Any = get_expected_output(__a )
_a : int = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__lowerCAmelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 229
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__lowerCAmelCase = get_logger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[int] = '''dummy_data'''
__UpperCAmelCase : List[str] = '''datasets'''
__UpperCAmelCase : int = False
def __init__( self : Optional[Any] ,_a : str ,_a : str ,_a : Union[Version, str] ,_a : Optional[str] = None ,_a : bool = False ,_a : bool = True ,_a : Optional[List[Callable]] = None ,):
'''simple docstring'''
_a : List[Any] = 0
_a : List[Any] = dataset_name
_a : Any = cache_dir
_a : Tuple = use_local_dummy_data
_a : List[Any] = config
# download_callbacks take a single url as input
_a : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_a : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_a : Union[str, Any] = str(_a )
# to be downloaded
_a : Optional[Any] = None
_a : str = None
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self._dummy_file is None:
_a : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' ,self.version_name )
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,'dummy_data.zip' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_a : Tuple = cached_path(
_a ,cache_dir=self.cache_dir ,extract_compressed_file=_a ,force_extract=_a )
return os.path.join(_a ,self.dummy_file_name )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self._bucket_url is None:
_a : int = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,'/' ) )
return self._bucket_url
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,'/' ).split('/' )[:-1] )
def __lowercase ( self : Optional[int] ,_a : Tuple ,*_a : str ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_a : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_a : List[str] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_a ,_a ):
return self.create_dummy_data_dict(_a ,_a )
elif isinstance(_a ,(list, tuple) ):
return self.create_dummy_data_list(_a ,_a )
else:
return self.create_dummy_data_single(_a ,_a )
def __lowercase ( self : Any ,_a : Union[str, Any] ,*_a : List[str] ):
'''simple docstring'''
return self.download_and_extract(_a )
def __lowercase ( self : Any ,_a : Optional[Any] ,_a : int ):
'''simple docstring'''
return self.download_and_extract(_a )
def __lowercase ( self : Optional[int] ,_a : List[str] ,*_a : Union[str, Any] ,**_a : List[str] ):
'''simple docstring'''
return path
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return {}
def __lowercase ( self : Optional[int] ,_a : str ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_a ,_a ):
for single_url in single_urls:
download_callback(_a )
else:
_a : str = single_urls
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_a ,_a ):
_a : List[str] = [os.path.join(_a ,urllib.parse.quote_plus(Path(_a ).name ) ) for x in single_urls]
else:
_a : List[Any] = single_urls
_a : Optional[Any] = os.path.join(_a ,urllib.parse.quote_plus(Path(_a ).name ) )
_a : str = value
# make sure that values are unique
if all(isinstance(_a ,_a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_a : List[str] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowercase ( self : Any ,_a : Tuple ,_a : List[Any] ):
'''simple docstring'''
_a : int = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_a : Tuple = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' ,_a ) ) for url in data_url )
_a : Optional[int] = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_a : List[Any] = [data_url[0]] * len(_a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : str = os.path.join(_a ,urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_a )
return dummy_data_list
def __lowercase ( self : Optional[Any] ,_a : str ,_a : Optional[int] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : Optional[Any] = os.path.join(_a ,urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[int] ,_a : str ):
'''simple docstring'''
def _iter_archive_members(_a : Any ):
# this preserves the order of the members inside the ZIP archive
_a : int = Path(self.dummy_file ).parent
_a : Any = path.relative_to(_a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_a : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_a )
_a : Optional[Any] = Path(_a )
_a : str = _iter_archive_members(_a ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_a ).as_posix(), file_path.open('rb' )
def __lowercase ( self : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
if not isinstance(_a ,_a ):
_a : Optional[int] = [paths]
for path in paths:
if os.path.isfile(_a ):
if os.path.basename(_a ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_a ):
if os.path.basename(_a ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_a ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_a ,_a )
| 229
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
"""t5-small""": 5_1_2,
"""t5-base""": 5_1_2,
"""t5-large""": 5_1_2,
"""t5-3b""": 5_1_2,
"""t5-11b""": 5_1_2,
}
lowerCAmelCase__ = """▁"""
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case=1_00 , __snake_case=None , __snake_case = None , __snake_case=True , **__snake_case , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(__snake_case)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase : List[str] = len(set(filter(lambda __snake_case: bool('extra_id' in str(__snake_case)) , __snake_case)))
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565')
_UpperCamelCase : Any = legacy
_UpperCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=__snake_case , **__snake_case , )
_UpperCamelCase : Tuple = vocab_file
_UpperCamelCase : Any = extra_ids
_UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__snake_case)
@staticmethod
def A__ ( __snake_case , __snake_case , __snake_case):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase : Dict = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , __snake_case , )
return max_model_length
@property
def A__ ( self):
return self.sp_model.get_piece_size() + self._extra_ids
def A__ ( self):
_UpperCamelCase : int = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__snake_case)) + [1]
return ([0] * len(__snake_case)) + [1] + ([0] * len(__snake_case)) + [1]
def A__ ( self):
return list(
set(filter(lambda __snake_case: bool(re.search(r'<extra_id_\d+>' , __snake_case)) is not None , self.additional_special_tokens)))
def A__ ( self):
return [self._convert_token_to_id(__snake_case) for token in self.get_sentinel_tokens()]
def A__ ( self , __snake_case):
if len(__snake_case) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Tuple = self._add_eos_if_not_present(__snake_case)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase : Tuple = self._add_eos_if_not_present(__snake_case)
return token_ids_a + token_ids_a
def __getstate__( self):
_UpperCamelCase : Any = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self , __snake_case , **__snake_case):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCamelCase : Any = SPIECE_UNDERLINE + text.replace(__snake_case , ' ')
return super().tokenize(__snake_case , **__snake_case)
def A__ ( self , __snake_case , **__snake_case):
if not self.legacy:
_UpperCamelCase : int = text.startswith(__snake_case)
if is_first:
_UpperCamelCase : Union[str, Any] = text[1:]
_UpperCamelCase : Optional[Any] = self.sp_model.encode(__snake_case , out_type=__snake_case)
if not self.legacy and not is_first and not text.startswith(' ') and tokens[0].startswith(__snake_case):
_UpperCamelCase : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def A__ ( self , __snake_case):
if token.startswith('<extra_id_'):
_UpperCamelCase : int = re.match(r'<extra_id_(\d+)>' , __snake_case)
_UpperCamelCase : Union[str, Any] = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__snake_case)
def A__ ( self , __snake_case):
if index < self.sp_model.get_piece_size():
_UpperCamelCase : Tuple = self.sp_model.IdToPiece(__snake_case)
else:
_UpperCamelCase : List[str] = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def A__ ( self , __snake_case):
_UpperCamelCase : List[Any] = []
_UpperCamelCase : List[Any] = ''
_UpperCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case) + token
_UpperCamelCase : str = True
_UpperCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__snake_case)
_UpperCamelCase : List[str] = False
out_string += self.sp_model.decode(__snake_case)
return out_string.strip()
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : Tuple = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 1
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> None:
_lowercase : str = len(lowerCamelCase_ )
print('The following activities are selected:' )
# The first activity is always selected
_lowercase : Optional[int] = 0
print(lowerCamelCase_ , end=',' )
# Consider rest of the activities
for j in range(lowerCamelCase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCamelCase_ , end=',' )
_lowercase : Any = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 89
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = "audio-spectrogram-transformer"
def __init__( self: Any , __lowerCamelCase: Union[str, Any]=7_68 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Optional[Any]=12 , __lowerCamelCase: Optional[int]=30_72 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Optional[Any]=0.0 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: List[Any]=1e-12 , __lowerCamelCase: Tuple=16 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=10 , __lowerCamelCase: str=10 , __lowerCamelCase: Any=10_24 , __lowerCamelCase: Dict=1_28 , **__lowerCamelCase: Optional[Any] , ) -> List[str]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : Any = qkv_bias
__UpperCAmelCase : str = frequency_stride
__UpperCAmelCase : Union[str, Any] = time_stride
__UpperCAmelCase : Dict = max_length
__UpperCAmelCase : Optional[Any] = num_mel_bins
| 382
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
set_seed(770)
UpperCAmelCase__ : Optional[int] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
UpperCAmelCase__ : Tuple = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
UpperCAmelCase__ : List[str] = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase__ : Dict = os.path.join(os.path.expanduser('~'), '.cache')
UpperCAmelCase__ : Any = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _A ( _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(_UpperCamelCase , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def _A ( _UpperCamelCase , _UpperCamelCase ):
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
hf_hub_download(repo_id=_UpperCamelCase , filename=_UpperCamelCase , local_dir=_UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase="text" ):
if model_type == "text":
_UpperCAmelCase : Optional[Any] = BarkSemanticModel
_UpperCAmelCase : str = BarkSemanticConfig
_UpperCAmelCase : Dict = BarkSemanticGenerationConfig
elif model_type == "coarse":
_UpperCAmelCase : Optional[Any] = BarkCoarseModel
_UpperCAmelCase : int = BarkCoarseConfig
_UpperCAmelCase : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
_UpperCAmelCase : List[str] = BarkFineModel
_UpperCAmelCase : Any = BarkFineConfig
_UpperCAmelCase : int = BarkFineGenerationConfig
else:
raise NotImplementedError()
_UpperCAmelCase : Union[str, Any] = F'''{model_type}_small''' if use_small else model_type
_UpperCAmelCase : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_UpperCamelCase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
_UpperCAmelCase : List[str] = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
# this is a hack
_UpperCAmelCase : Union[str, Any] = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
_UpperCAmelCase : List[Any] = model_args['''vocab_size''']
_UpperCAmelCase : int = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_UpperCAmelCase : Any = model_args.pop('''n_head''' )
_UpperCAmelCase : Optional[Any] = model_args.pop('''n_embd''' )
_UpperCAmelCase : Union[str, Any] = model_args.pop('''n_layer''' )
_UpperCAmelCase : Dict = ConfigClass(**checkpoint['''model_args'''] )
_UpperCAmelCase : str = ModelClass(config=_UpperCamelCase )
_UpperCAmelCase : Optional[int] = GenerationConfigClass()
_UpperCAmelCase : str = model_generation_config
_UpperCAmelCase : List[str] = checkpoint['''model''']
# fixup checkpoint
_UpperCAmelCase : Dict = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(_UpperCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
_UpperCAmelCase : int = k[len(_UpperCamelCase ) :]
for old_layer_name in new_layer_name_dict:
_UpperCAmelCase : Dict = new_k.replace(_UpperCamelCase , new_layer_name_dict[old_layer_name] )
_UpperCAmelCase : Optional[int] = state_dict.pop(_UpperCamelCase )
_UpperCAmelCase : Dict = set(state_dict.keys() ) - set(model.state_dict().keys() )
_UpperCAmelCase : Optional[Any] = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
_UpperCAmelCase : int = set(model.state_dict().keys() ) - set(state_dict.keys() )
_UpperCAmelCase : Optional[Any] = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(_UpperCamelCase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(_UpperCamelCase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
_UpperCAmelCase : Optional[int] = model.num_parameters(exclude_embeddings=_UpperCamelCase )
_UpperCAmelCase : Dict = checkpoint['''best_val_loss'''].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(_UpperCamelCase , 3 )} loss''' )
model.eval()
model.to(_UpperCamelCase )
del checkpoint, state_dict
return model
def _A ( _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_UpperCAmelCase : List[Any] = '''cpu''' # do conversion on cpu
_UpperCAmelCase : List[Any] = _get_ckpt_path(_UpperCamelCase , use_small=_UpperCamelCase )
_UpperCAmelCase : Optional[Any] = _load_model(_UpperCamelCase , _UpperCamelCase , model_type=_UpperCamelCase , use_small=_UpperCamelCase )
# load bark initial model
_UpperCAmelCase : List[str] = _bark_load_model(_UpperCamelCase , '''cpu''' , model_type=_UpperCamelCase , use_small=_UpperCamelCase )
if model_type == "text":
_UpperCAmelCase : Optional[Any] = bark_model['''model''']
if model.num_parameters(exclude_embeddings=_UpperCamelCase ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
_UpperCAmelCase : Optional[int] = 5
_UpperCAmelCase : Optional[int] = 10
if model_type in ["text", "coarse"]:
_UpperCAmelCase : Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_UpperCAmelCase : Optional[Any] = bark_model(_UpperCamelCase )[0]
_UpperCAmelCase : Optional[int] = model(_UpperCamelCase )
# take last logits
_UpperCAmelCase : str = output_new_model_total.logits[:, [-1], :]
else:
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : Dict = 8
_UpperCAmelCase : Union[str, Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_UpperCAmelCase : str = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Tuple = bark_model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase : Tuple = os.path.join(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Tuple = BarkSemanticConfig.from_pretrained(os.path.join(_UpperCamelCase , '''config.json''' ) )
_UpperCAmelCase : Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(_UpperCamelCase , '''config.json''' ) )
_UpperCAmelCase : Any = BarkFineConfig.from_pretrained(os.path.join(_UpperCamelCase , '''config.json''' ) )
_UpperCAmelCase : List[str] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
_UpperCAmelCase : Optional[int] = BarkSemanticModel.from_pretrained(_UpperCamelCase )
_UpperCAmelCase : Any = BarkCoarseModel.from_pretrained(_UpperCamelCase )
_UpperCAmelCase : Dict = BarkFineModel.from_pretrained(_UpperCamelCase )
_UpperCAmelCase : List[str] = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
_UpperCAmelCase : str = BarkConfig.from_sub_model_configs(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Dict = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_UpperCAmelCase : Union[str, Any] = BarkModel(_UpperCamelCase )
_UpperCAmelCase : Any = semantic
_UpperCAmelCase : List[Any] = coarseAcoustic
_UpperCAmelCase : List[str] = fineAcoustic
_UpperCAmelCase : List[Any] = codec
_UpperCAmelCase : Optional[int] = bark_generation_config
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
bark.save_pretrained(_UpperCamelCase , repo_id=_UpperCamelCase , push_to_hub=_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
UpperCAmelCase__ : Any = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 416
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _A ( _UpperCamelCase ):
_UpperCAmelCase : Tuple = prime_factors(_UpperCamelCase )
if is_square_free(_UpperCamelCase ):
return -1 if len(_UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : Any = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 193
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowerCAmelCase__ = getattr(snake_case__ , snake_case__ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ = True
elif name.split('.' )[0] == "proj":
lowerCAmelCase__ = fairseq_model.proj
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(snake_case__ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
lowerCAmelCase__ = 'weight'
else:
lowerCAmelCase__ = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ = name.split('.' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( snake_case__ ) -> Any:
"""simple docstring"""
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [line.split(' ' )[0] for line in lines]
lowerCAmelCase__ = len(snake_case__ )
lowerCAmelCase__ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(snake_case__ )
lowerCAmelCase__ = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowerCAmelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ = WavaVecaModel(snake_case__ )
lowerCAmelCase__ = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
lowerCAmelCase__ = SpeechaTextaForCausalLM(snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowerCAmelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCAmelCase__ = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
lowerCAmelCase__ = False
# add projection layer
lowerCAmelCase__ = nn.Parameter(projection_layer.weight )
lowerCAmelCase__ = nn.Parameter(projection_layer.bias )
lowerCAmelCase__ = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(snake_case__ , snake_case__ )
lowerCAmelCase__ = SpeechaTextaTokenizer(os.path.join(snake_case__ , 'vocab.json' ) )
tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase__ = hf_wavavec.config.to_dict()
lowerCAmelCase__ = tokenizer.pad_token_id
lowerCAmelCase__ = tokenizer.bos_token_id
lowerCAmelCase__ = tokenizer.eos_token_id
lowerCAmelCase__ = 'speech_to_text_2'
lowerCAmelCase__ = 'wav2vec2'
lowerCAmelCase__ = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 193
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="swinv2"
_lowerCamelCase ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , a__ : Any=224 , a__ : Optional[Any]=4 , a__ : Optional[Any]=3 , a__ : str=96 , a__ : Optional[int]=[2, 2, 6, 2] , a__ : int=[3, 6, 12, 24] , a__ : List[str]=7 , a__ : str=4.0 , a__ : List[str]=True , a__ : Union[str, Any]=0.0 , a__ : Dict=0.0 , a__ : Any=0.1 , a__ : str="gelu" , a__ : Any=False , a__ : Dict=0.02 , a__ : Optional[Any]=1e-5 , a__ : Union[str, Any]=32 , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(a__ )
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(a__ ) - 1) )
UpperCAmelCase = (0, 0, 0, 0)
| 570
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : int , a__ : Optional[Any]=3 , a__ : Tuple=32 , a__ : Optional[int]=3 , a__ : Optional[int]=10 , a__ : Optional[Any]=[8, 16, 32, 64] , a__ : List[str]=[1, 1, 2, 1] , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : Optional[int]="relu" , a__ : Any=3 , a__ : int=None , a__ : List[str]=["stage2", "stage3", "stage4"] , a__ : Optional[int]=[2, 3, 4] , a__ : Optional[Any]=1 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(a__ )
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
UpperCAmelCase = num_groups
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Dict ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __snake_case ( self : Optional[Any] , a__ : str , a__ : Optional[int] , a__ : Union[str, Any] ):
UpperCAmelCase = BitModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , a__ : List[Any] , a__ : Optional[Any] , a__ : Tuple ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = BitForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , a__ : Optional[int] , a__ : str , a__ : int ):
UpperCAmelCase = BitBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = BitBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase =(
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : List[str] ):
UpperCAmelCase = BitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __snake_case ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Optional[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __snake_case ( self : List[str] ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : List[Any] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def __snake_case ( self : int ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=a__ )
for name, module in model.named_modules():
if isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __snake_case ( self : int ):
def check_hidden_states_output(a__ : List[str] , a__ : Tuple , a__ : str ):
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase = layer_type
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __snake_case ( self : int ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Dict ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(BitBackbone,) if is_torch_available() else ()
_lowerCamelCase =BitConfig
_lowerCamelCase =False
def __snake_case ( self : Dict ):
UpperCAmelCase = BitModelTester(self )
| 570
| 1
|
'''simple docstring'''
from __future__ import annotations
A_ = 10
def _UpperCamelCase ( __UpperCamelCase ) -> list[int]:
lowerCamelCase_ = 1
lowerCamelCase_ = max(__UpperCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase_ = [[] for _ in range(__UpperCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(__UpperCamelCase )
# put each buckets' contents into list_of_ints
lowerCamelCase_ = 0
for b in range(__UpperCamelCase ):
for i in buckets[b]:
lowerCamelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42
| 1
|
"""simple docstring"""
def a__ ( snake_case__ ) -> Any:
lowerCamelCase = len(snake_case__ )
lowerCamelCase = sum(snake_case__ )
lowerCamelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCamelCase = True
for i in range(1 , s + 1 ):
lowerCamelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCamelCase = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCamelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCamelCase = s - 2 * j
break
return diff
| 717
|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = [False] * len(snake_case__ )
lowerCamelCase = [-1] * len(snake_case__ )
def dfs(snake_case__ , snake_case__ ):
lowerCamelCase = True
lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(snake_case__ , 1 - c )
for i in range(len(snake_case__ ) ):
if not visited[i]:
dfs(snake_case__ , 0 )
for i in range(len(snake_case__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 533
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = 10
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = [1, 2, 3, 4]
__lowercase : List[str] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowercase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowercase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
__lowercase : Union[str, Any] = process_story(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [] )
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : int = ""
__lowercase : str = process_story(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [] )
self.assertEqual(__lowerCamelCase , [] )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : int = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
__lowercase : str = process_story(__lowerCamelCase )
__lowercase : Any = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : str = ["It was the best of times."]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = torch.tensor([1, 2, 3, 4] )
__lowercase : Optional[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowercase : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowercase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = 101
__lowercase : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__lowercase : str = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowercase : List[Any] = compute_token_type_ids(__lowerCamelCase , __lowerCamelCase )
np.testing.assert_array_equal(__lowerCamelCase , __lowerCamelCase )
| 149
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[int] = ort.SessionOptions()
_A : Any = False
return options
def _lowerCamelCase ( self) -> str:
_A : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_A : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_A : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : Tuple = "A red cat sitting on a park bench"
_A : Dict = np.random.RandomState(0)
_A : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type="np" , )
_A : List[Any] = output.images
_A : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _lowerCamelCase ( self) -> List[str]:
_A : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_A : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_A : Dict = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx")
_A : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : Optional[int] = "A red cat sitting on a park bench"
_A : Union[str, Any] = np.random.RandomState(0)
_A : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type="np" , )
_A : str = output.images
_A : Optional[int] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 503
| 0
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Tuple ="""MCTCTFeatureExtractor"""
_SCREAMING_SNAKE_CASE : int ="""AutoTokenizer"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_A= self.feature_extractor
_A= False
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_A= kwargs.pop('raw_speech' )
else:
_A= kwargs.pop('audio' , lowerCAmelCase__ )
_A= kwargs.pop('sampling_rate' , lowerCAmelCase__ )
_A= kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_A= args[0]
_A= args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_A= self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
_A= self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A= encodings['input_ids']
return inputs
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase__ , **lowerCAmelCase__ )
_A= kwargs.pop('input_features' , lowerCAmelCase__ )
_A= kwargs.pop('labels' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_A= args[0]
_A= args[1:]
if input_features is not None:
_A= self.feature_extractor.pad(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
if labels is not None:
_A= self.tokenizer.pad(lowerCAmelCase__ , **lowerCAmelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_A= labels['input_ids']
return input_features
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def a__ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_A= True
_A= self.tokenizer
yield
_A= self.feature_extractor
_A= False
| 702
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( lowerCAmelCase_="" ) -> str:
'''simple docstring'''
_A= tempfile.mkdtemp()
return os.path.join(lowerCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= torch.rand(12 , dtype=torch.floataa ) - 0.5
_A= AgentAudio(lowerCAmelCase__ )
_A= str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_A, _A= sf.read(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , torch.tensor(lowerCAmelCase__ ) , atol=1E-4 ) )
def a__ ( self ):
_A= torch.rand(12 , dtype=torch.floataa ) - 0.5
_A= get_new_path(suffix='.wav' )
sf.write(lowerCAmelCase__ , lowerCAmelCase__ , 16000 )
_A= AgentAudio(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase__ )
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= torch.randint(0 , 256 , (64, 64, 3) )
_A= AgentImage(lowerCAmelCase__ )
_A= str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def a__ ( self ):
_A= Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_A= Image.open(lowerCAmelCase__ )
_A= AgentImage(lowerCAmelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def a__ ( self ):
_A= Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_A= Image.open(lowerCAmelCase__ )
_A= AgentImage(lowerCAmelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= 'Hey!'
_A= AgentText(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , agent_type.to_string() )
self.assertEqual(lowerCAmelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 476
| 0
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = "cpu" , __UpperCamelCase = None ):
__lowercase : str = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
__lowercase : Tuple = v.half()
if save_path is None: # overwrite src_path
__lowercase : List[Any] = src_path
torch.save(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 76
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
a_ :Tuple ="""resnet"""
a_ :int =["""basic""", """bottleneck"""]
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : Dict=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , SCREAMING_SNAKE_CASE__ : Optional[int]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE__ : Optional[Any]="bottleneck" , SCREAMING_SNAKE_CASE__ : str="relu" , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
__a = num_channels
__a = embedding_size
__a = hidden_sizes
__a = depths
__a = layer_type
__a = hidden_act
__a = downsample_in_first_stage
__a = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Any =version.parse("""1.11""" )
@property
def __a ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return 1E-3
| 582
| 0
|
'''simple docstring'''
import os
import sys
snake_case_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
snake_case_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowercase (*_SCREAMING_SNAKE_CASE :Union[str, Any] , **_SCREAMING_SNAKE_CASE :Tuple ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowercase (*_SCREAMING_SNAKE_CASE :Union[str, Any] , **_SCREAMING_SNAKE_CASE :Optional[Any] ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def __lowercase (*_SCREAMING_SNAKE_CASE :List[Any] , **_SCREAMING_SNAKE_CASE :Any ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowercase (*_SCREAMING_SNAKE_CASE :List[Any] , **_SCREAMING_SNAKE_CASE :Dict ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowercase (*_SCREAMING_SNAKE_CASE :Tuple , **_SCREAMING_SNAKE_CASE :int ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowercase (*_SCREAMING_SNAKE_CASE :Dict , **_SCREAMING_SNAKE_CASE :List[Any] ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowercase (*_SCREAMING_SNAKE_CASE :List[str] , **_SCREAMING_SNAKE_CASE :Any ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 712
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowercase (_SCREAMING_SNAKE_CASE :List[str]=32 , _SCREAMING_SNAKE_CASE :List[Any]=10 , _SCREAMING_SNAKE_CASE :Optional[Any]=1_00 , _SCREAMING_SNAKE_CASE :int=10_26 , _SCREAMING_SNAKE_CASE :List[Any]=True , _SCREAMING_SNAKE_CASE :str="data/tokenized_stories_train_wikitext103.jbl" , _SCREAMING_SNAKE_CASE :List[Any]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = generate_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Any = load_gpta('''gpt2''' ).to(_SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : List[Any] = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , _SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Optional[int]=15 , _SCREAMING_SNAKE_CASE :Any=1_28 , _SCREAMING_SNAKE_CASE :Any=1_00 , _SCREAMING_SNAKE_CASE :List[str]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(_SCREAMING_SNAKE_CASE )
# Train secondary learner
SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_epochs=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=_SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any]=32 , _SCREAMING_SNAKE_CASE :Tuple=10_00 , _SCREAMING_SNAKE_CASE :int=16 , _SCREAMING_SNAKE_CASE :List[str]=1.0 , _SCREAMING_SNAKE_CASE :Any=recopy_gpta , _SCREAMING_SNAKE_CASE :Tuple=None , _SCREAMING_SNAKE_CASE :int=10 , _SCREAMING_SNAKE_CASE :Optional[int]="gpt2_finetuned.pt" , ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Any = RandomSampler(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = max_steps // (len(_SCREAMING_SNAKE_CASE )) + 1
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = recopy_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(_SCREAMING_SNAKE_CASE )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : Any = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , _SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
for epoch in range(int(_SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(_SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : str = secondary_learner.forward(
torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : Dict = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : Tuple = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : Any = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , _SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowercase ():
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=_SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=_SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=_SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : int = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : List[Any] = training_secondary_learner(
_SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_SCREAMING_SNAKE_CASE , secondary_learner=_SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 355
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = XLMTokenizer
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = '''lower newer'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ = '''lower'''
lowerCAmelCase__ = ['''low''', '''er</w>''']
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokens + ['''<unk>''']
lowerCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 90
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader
| 90
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase_: list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
_a = nums[0]
_a = 0
for num in nums[1:]:
_a , _a = (
max_excluding + num,
max(UpperCamelCase_ , UpperCamelCase_ ),
)
return max(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowercase_ (_UpperCAmelCase ):
def __init__( self , a_ ) ->Tuple:
'''simple docstring'''
_a = data
def __iter__( self ) ->Optional[int]:
'''simple docstring'''
for element in self.data:
yield element
def lowerCAmelCase ( UpperCamelCase_: Optional[int]=True ) -> List[Any]:
'''simple docstring'''
_a = Accelerator(even_batches=UpperCamelCase_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCAmelCase ( UpperCamelCase_: Accelerator , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: bool = False ) -> Tuple:
'''simple docstring'''
if iterable:
_a = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase_ ) ) )
else:
_a = TensorDataset(torch.as_tensor(range(UpperCamelCase_ ) ) )
_a = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ )
_a = accelerator.prepare(UpperCamelCase_ )
return dl
def lowerCAmelCase ( UpperCamelCase_: Accelerator , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[int] , UpperCamelCase_: List[int] , ) -> List[Any]:
'''simple docstring'''
_a = create_dataloader(accelerator=UpperCamelCase_ , dataset_size=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_a = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
_a = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCamelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCamelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCAmelCase ( ) -> str:
'''simple docstring'''
_a = create_accelerator(even_batches=UpperCamelCase_ )
verify_dataloader_batch_sizes(
UpperCamelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCamelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCAmelCase ( ) -> int:
'''simple docstring'''
_a = create_accelerator(even_batches=UpperCamelCase_ )
_a = torch.nn.Linear(1 , 1 )
_a = accelerator.prepare(UpperCamelCase_ )
_a = create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 )
_a = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCamelCase_ ):
_a = ddp_model(batch[0].float() )
_a = output.sum()
loss.backward()
batch_idxs.append(UpperCamelCase_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCAmelCase ( UpperCamelCase_: int ) -> Tuple:
'''simple docstring'''
with warnings.catch_warnings(record=UpperCamelCase_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCamelCase_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
_a = True
_a = False
_a = create_accelerator(even_batches=UpperCamelCase_ )
_a = torch.nn.Linear(1 , 1 )
_a = accelerator.prepare(UpperCamelCase_ )
_a = create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 )
_a = create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase_ ):
_a = train_dl.batch_sampler.even_batches
_a = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
_a = True
_a = False
_a = create_accelerator(even_batches=UpperCamelCase_ )
_a = torch.nn.Linear(1 , 1 )
_a = accelerator.prepare(UpperCamelCase_ )
create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase_ )
_a = create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase_ ):
_a = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
_a = create_accelerator()
_a = torch.nn.Linear(1 , 1 )
_a = accelerator.prepare(UpperCamelCase_ )
create_dataloader(UpperCamelCase_ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase_ )
with warnings.catch_warnings(record=UpperCamelCase_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase_ ):
pass
assert issubclass(w[-1].category , UpperCamelCase_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
_a = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_a = accelerator.state.distributed_type
_a = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase_ )
_a = original_state
if __name__ == "__main__":
main()
| 612
| 1
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__a = logging.get_logger(__name__)
__a = """T5Config"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->jnp.ndarray:
UpperCAmelCase = jnp.zeros_like(lowerCAmelCase_ )
UpperCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase = shifted_input_ids.at[:, 0].set(lowerCAmelCase_ )
UpperCAmelCase = jnp.where(shifted_input_ids == -1_0_0 , lowerCAmelCase_ , lowerCAmelCase_ )
return shifted_input_ids
class __lowercase ( __snake_case ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class __lowercase ( __snake_case ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class __lowercase ( __snake_case ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
| 377
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int=1_3 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=2_2_4 , __lowerCamelCase : int=1_0_0_0 , __lowerCamelCase : int=[3, 3, 6, 4] , __lowerCamelCase : Dict=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = num_labels
UpperCAmelCase = image_size
UpperCAmelCase = layer_depths
UpperCAmelCase = embed_dims
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _lowercase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _lowercase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = self.prepare_config_and_inputs()
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = SwiftFormerModelTester(self )
UpperCAmelCase = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
def _config_zero_init(__lowerCamelCase : Any ):
UpperCAmelCase = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-1_0 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
UpperCAmelCase = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def _UpperCamelCase ( ) ->Dict:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__lowerCamelCase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""pt""" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 377
| 1
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def UpperCAmelCase_ ( cls , A_ , A_ , A_ )-> Any:
'''simple docstring'''
return cls(common=A_ , init_noise_sigma=A_ , timesteps=A_ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return True
@register_to_config
def __init__( self , A_ = 1000 , A_ = 0.0_001 , A_ = 0.02 , A_ = "linear" , A_ = None , A_ = "fixed_small" , A_ = True , A_ = "epsilon" , A_ = jnp.floataa , )-> List[str]:
'''simple docstring'''
UpperCamelCase = dtype
def UpperCAmelCase_ ( self , A_ = None )-> DDPMSchedulerState:
'''simple docstring'''
if common is None:
UpperCamelCase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCamelCase = jnp.array(1.0 , dtype=self.dtype )
UpperCamelCase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A_ , init_noise_sigma=A_ , timesteps=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None )-> jnp.ndarray:
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , A_ , A_ , A_ = () )-> DDPMSchedulerState:
'''simple docstring'''
UpperCamelCase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCamelCase = (jnp.arange(0 , A_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A_ , timesteps=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_=None , A_=None )-> Dict:
'''simple docstring'''
UpperCamelCase = state.common.alphas_cumprod[t]
UpperCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCamelCase = jnp.clip(A_ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCamelCase = jnp.log(jnp.clip(A_ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCamelCase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCamelCase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCamelCase = variance
UpperCamelCase = state.common.betas[t]
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ = None , A_ = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
UpperCamelCase = timestep
if key is None:
UpperCamelCase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCamelCase , UpperCamelCase = jnp.split(A_ , sample.shape[1] , axis=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
UpperCamelCase = state.common.alphas_cumprod[t]
UpperCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = jnp.clip(A_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCamelCase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCamelCase = jax.random.split(A_ , num=1 )
UpperCamelCase = jax.random.normal(A_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A_ , A_ , predicted_variance=A_ ) ** 0.5) * noise
UpperCamelCase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A_ , state=A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , )-> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , A_ , A_ , A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , )-> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , A_ , A_ , A_ )
def __len__( self )-> List[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 432
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase : int = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase : int = 'roberta'
elif args.model_type == "gpt2":
lowerCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase : Optional[int] = 'transformer'
lowerCAmelCase : str = model.state_dict()
lowerCAmelCase : List[str] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase : Any = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
lowerCAmelCase : str = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase : List[Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
lowerCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase : int = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
lowerCAmelCase : Union[str, Any] = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase : Any = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase : List[str] = state_dict[f"""lm_head.dense.{w}"""]
lowerCAmelCase : Any = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase : Dict = state_dict[f"""{prefix}.ln_f.{w}"""]
lowerCAmelCase : Tuple = state_dict['lm_head.weight']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 432
| 1
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ='''https://openaipublic.azureedge.net/jukebox/models/'''
__SCREAMING_SNAKE_CASE : Union[str, Any] ={
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def UpperCamelCase__ ( lowerCAmelCase__ ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
lowercase = key.replace(""".model.1.bias""" ,""".conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
lowercase = key.replace(""".model.1.weight""" ,""".conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
lowercase = key.replace(""".model.3.bias""" ,""".conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
lowercase = key.replace(""".model.3.weight""" ,""".conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
lowercase = key.replace("""conditioner_blocks.0""" ,"""conditioner_blocks""" )
if "prime_prior" in key:
lowercase = key.replace("""prime_prior""" ,"""encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase = key.replace(""".emb.""" ,""".""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" ,""".codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" ,"""metadata_embedding.""" )
if "x_emb.emb." in key:
lowercase = key.replace("""0.x_emb.emb""" ,"""embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" ,"""encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" ,""".layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" ,"""_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" ,"""encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" ,"""encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" ,"""fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" ,"""embed_tokens""" )
return key
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = {}
import re
lowercase = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
lowercase = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
lowercase = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
lowercase = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowercase = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCAmelCase__ ):
lowercase = re_encoder_block_conv_in.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowercase = re_encoder_block_conv_in.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
elif re_encoder_block_resnet.fullmatch(lowerCAmelCase__ ):
lowercase = re_encoder_block_resnet.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = {"""1""": 1, """3""": 2}[groups[-2]]
lowercase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowercase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowercase = prefix + resnet_block
lowercase = re_encoder_block_resnet.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase__ ):
lowercase = re_encoder_block_proj_out.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowercase = re_encoder_block_proj_out.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase__ ):
lowercase = re_decoder_block_conv_out.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowercase = re_decoder_block_conv_out.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
elif re_decoder_block_resnet.fullmatch(lowerCAmelCase__ ):
lowercase = re_decoder_block_resnet.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = {"""1""": 1, """3""": 2}[groups[-2]]
lowercase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowercase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowercase = prefix + resnet_block
lowercase = re_decoder_block_resnet.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase__ ):
lowercase = re_decoder_block_proj_in.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowercase = re_decoder_block_proj_in.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase__ ):
lowercase = re_prior_cond_conv_out.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowercase = re_prior_cond_conv_out.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
elif re_prior_cond_resnet.fullmatch(lowerCAmelCase__ ):
lowercase = re_prior_cond_resnet.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = {"""1""": 1, """3""": 2}[groups[-2]]
lowercase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowercase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowercase = prefix + resnet_block
lowercase = re_prior_cond_resnet.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase__ ):
lowercase = re_prior_cond_proj_in.match(lowerCAmelCase__ )
lowercase = regex_match.groups()
lowercase = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowercase = re_prior_cond_proj_in.sub(lowerCAmelCase__ ,lowerCAmelCase__ )
# keep original key
else:
lowercase = original_key
lowercase = replace_key(lowerCAmelCase__ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
lowercase = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowercase = original_key
lowercase = original_key
lowercase = value
return new_dict
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase__=None ,lowerCAmelCase__=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowercase = requests.get(f"""{PREFIX}{file}""" ,allow_redirects=lowerCAmelCase__ )
os.makedirs(f"""{pytorch_dump_folder_path}/""" ,exist_ok=lowerCAmelCase__ )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ,"""wb""" ).write(r.content )
lowercase = MODEL_MAPPING[model_name.split("""/""" )[-1]]
lowercase = JukeboxConfig.from_pretrained(lowerCAmelCase__ )
lowercase = JukeboxModel(lowerCAmelCase__ )
lowercase = []
lowercase = {}
for i, dict_name in enumerate(lowerCAmelCase__ ):
lowercase = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
lowercase = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
lowercase = old_dic[k]
elif k.endswith(""".w""" ):
lowercase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase = old_dic[k]
else:
lowercase = old_dic[k]
lowercase = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
lowercase = fix_jukebox_keys(lowerCAmelCase__ ,model.state_dict() ,lowerCAmelCase__ ,lowerCAmelCase__ )
weight_dict.append(lowerCAmelCase__ )
lowercase = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" ,"""w""" ) as txtfile:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
return weight_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
__SCREAMING_SNAKE_CASE : Dict =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 428
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__SCREAMING_SNAKE_CASE : int =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__SCREAMING_SNAKE_CASE : str ={
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __a ):
_A :List[str] = VOCAB_FILES_NAMES
_A :Dict = PRETRAINED_VOCAB_FILES_MAP
_A :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A :List[Any] = BlenderbotSmallTokenizer
def __init__( self : List[Any] , snake_case__ : Optional[Any]=None , snake_case__ : Any=None , snake_case__ : List[Any]="<|endoftext|>" , snake_case__ : Union[str, Any]="<|endoftext|>" , snake_case__ : Optional[Any]="<|endoftext|>" , snake_case__ : int=False , snake_case__ : List[str]=True , **snake_case__ : List[Any] , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int , snake_case__ : List[Any]=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 428
| 1
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , num_proc=_snake_case , **_snake_case , )
__lowerCamelCase = field
__lowerCamelCase = path_or_paths if isinstance(_snake_case , _snake_case ) else {self.split: path_or_paths}
__lowerCamelCase = Json(
cache_dir=_snake_case , data_files=_snake_case , features=_snake_case , field=_snake_case , **_snake_case , )
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_snake_case , in_memory=self.keep_in_memory )
return dataset
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__lowerCamelCase = dataset
__lowerCamelCase = path_or_buf
__lowerCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase = num_proc
__lowerCamelCase = '''utf-8'''
__lowerCamelCase = to_json_kwargs
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _snake_case )
__lowerCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
__lowerCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__lowerCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__lowerCamelCase = self.to_json_kwargs.pop('''compression''' , _snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_snake_case ) as buffer:
__lowerCamelCase = self._write(file_obj=_snake_case , orient=_snake_case , lines=_snake_case , index=_snake_case , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__lowerCamelCase = self._write(
file_obj=self.path_or_buf , orient=_snake_case , lines=_snake_case , index=_snake_case , **self.to_json_kwargs )
return written
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = args
__lowerCamelCase = query_table(
table=self.dataset.data , key=slice(_snake_case , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase = batch.to_pandas().to_json(
path_or_buf=_snake_case , orient=_snake_case , lines=_snake_case , index=_snake_case , **_snake_case )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__lowerCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
__lowerCamelCase , __lowerCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _snake_case , _snake_case )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_snake_case )
return written
| 575
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : Tuple ={
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =[
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 575
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __a (a__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = ['''image_processor''', '''tokenizer''']
_SCREAMING_SNAKE_CASE :Dict = '''AutoImageProcessor'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = '''AutoTokenizer'''
def __init__( self , _a , _a ) -> Optional[int]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def _a ( self , *_a , **_a ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _a ( self , *_a , **_a ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def _a ( self ) -> Tuple:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 680
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase__ : Union[str, Any] = TypeVar('T')
UpperCAmelCase__ : List[Any] = TypeVar('U')
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = key
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] | None = None
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] | None = None
def __repr__(self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
def __init__(self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rear, self.head
def __repr__(self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""DoubleLinkedList"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE__ : int = node
SCREAMING_SNAKE_CASE__ : Optional[Any] = previous
SCREAMING_SNAKE_CASE__ : List[str] = node
SCREAMING_SNAKE_CASE__ : List[Any] = self.rear
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.next
SCREAMING_SNAKE_CASE__ : Optional[int] = node.prev
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
return node
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
__UpperCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : DoubleLinkedList[T, U] = DoubleLinkedList()
SCREAMING_SNAKE_CASE__ : List[Any] = capacity
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__(self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__(self , SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
return key in self.cache
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = self.cache[key]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE__ )
return node.val
self.miss += 1
return None
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE__ : Any = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE__ : List[str] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE__ : Optional[int] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
self.list.add(SCREAMING_SNAKE_CASE__ )
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(SCREAMING_SNAKE_CASE__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE__ : List[str] = LRUCache(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE__ : Tuple = func(*SCREAMING_SNAKE_CASE__ )
cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE__ , """cache_info""" , SCREAMING_SNAKE_CASE__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 128
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__snake_case = trt.Logger(trt.Logger.WARNING)
__snake_case = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__snake_case = logging.getLogger(__name__)
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__snake_case = parser.parse_args()
if args.tokenizer_name:
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__snake_case = args.per_device_eval_batch_size
__snake_case = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__snake_case = True
__snake_case = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__snake_case = 'temp_engine/bert-fp16.engine'
if args.inta:
__snake_case = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__snake_case = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__snake_case = [network.get_input(i) for i in range(network.num_inputs)]
__snake_case = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__snake_case = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__snake_case = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__snake_case = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : str ):
lowercase__ : List[Any] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase__ : Optional[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase__ : Dict = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase__ )
# start time
lowercase__ : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase__ ) for d_inp in d_inputs] + [int(lowerCamelCase__ ), int(lowerCamelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ : str = time.time()
lowercase__ : Tuple = end_time - start_time
lowercase__ : List[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__snake_case = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__snake_case = raw_datasets['validation'].column_names
__snake_case = 'question' if 'question' in column_names else column_names[0]
__snake_case = 'context' if 'context' in column_names else column_names[1]
__snake_case = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__snake_case = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__snake_case = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCamelCase ( lowerCamelCase__ : str ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ : List[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ : Tuple = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ : Union[str, Any] = tokenized_examples.sequence_ids(lowerCamelCase__ )
lowercase__ : List[str] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__snake_case = raw_datasets['validation']
# Validation Feature Creation
__snake_case = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__snake_case = default_data_collator
__snake_case = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__snake_case = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase__ : List[Any] = postprocess_qa_predictions(
examples=lowerCamelCase__ , features=lowerCamelCase__ , predictions=lowerCamelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ : str = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase__ : int = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase__ : str = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase__ , label_ids=lowerCamelCase__ )
__snake_case = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCamelCase ( lowerCamelCase__ : Tuple ):
return trt.volume(engine.get_binding_shape(lowerCamelCase__ ) ) * engine.get_binding_dtype(lowerCamelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
__snake_case = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__snake_case = cuda.mem_alloc(h_outputa.nbytes)
__snake_case = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__snake_case = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
__snake_case = 0.0
__snake_case = 0
__snake_case = timeit.default_timer()
__snake_case = None
for step, batch in enumerate(eval_dataloader):
__snake_case , __snake_case = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__snake_case , __snake_case = outputs
__snake_case = torch.tensor(start_logits)
__snake_case = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__snake_case = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__snake_case = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__snake_case = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__snake_case = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__snake_case = nested_truncate(all_preds, len(eval_dataset))
__snake_case = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
__snake_case = post_processing_function(eval_examples, eval_dataset, all_preds)
__snake_case = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 128
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = """▁"""
__UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
__UpperCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
__UpperCAmelCase = {
"""facebook/s2t-small-librispeech-asr""": 1_024,
}
__UpperCAmelCase = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
__UpperCAmelCase = {"""mustc""": MUSTC_LANGS}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any =MAX_MODEL_INPUT_SIZES
lowerCamelCase : List[Any] =["input_ids", "attention_mask"]
lowerCamelCase : List[int] =[]
def __init__( self : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : int="<s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Any="<pad>" , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : str=False , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : int , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , do_upper_case=lowerCAmelCase , do_lower_case=lowerCAmelCase , tgt_lang=lowerCAmelCase , lang_codes=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
__lowerCAmelCase : Tuple = do_upper_case
__lowerCAmelCase : Tuple = do_lower_case
__lowerCAmelCase : Optional[int] = load_json(lowerCAmelCase )
__lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase : Optional[Any] = spm_file
__lowerCAmelCase : List[str] = load_spm(lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__lowerCAmelCase : Union[str, Any] = lang_codes
__lowerCAmelCase : str = LANGUAGES[lang_codes]
__lowerCAmelCase : Union[str, Any] = [f'''<lang:{lang}>''' for lang in self.langs]
__lowerCAmelCase : List[str] = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
__lowerCAmelCase : Optional[int] = self.lang_tokens
__lowerCAmelCase : Optional[int] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowerCAmelCase : List[str] = {}
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : str ) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : str ) -> None:
"""simple docstring"""
__lowerCAmelCase : str = self.lang_code_to_id[tgt_lang]
__lowerCAmelCase : Union[str, Any] = [lang_code_id]
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(lowerCAmelCase , self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : int ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Any = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowerCAmelCase : int = self.sp_model.decode(lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = self.sp_model.decode(lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
__lowerCAmelCase : str = [1] * len(self.prefix_tokens )
__lowerCAmelCase : List[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.__dict__.copy()
__lowerCAmelCase : int = None
return state
def __setstate__( self : Union[str, Any] , lowerCAmelCase : Dict ) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowerCAmelCase : int = Path(lowerCAmelCase )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
__lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__lowerCAmelCase : Tuple = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase , """wb""" ) as fi:
__lowerCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (str(lowerCAmelCase ), str(lowerCAmelCase ))
def snake_case_ (__A : str , __A : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__lowerCAmelCase : Any = sentencepiece.SentencePieceProcessor(**__A )
spm.Load(str(__A ) )
return spm
def snake_case_ (__A : str ) -> Union[Dict, List]:
with open(__A , """r""" ) as f:
return json.load(__A )
def snake_case_ (__A : str , __A : str ) -> None:
with open(__A , """w""" ) as f:
json.dump(__A , __A , indent=2 )
| 651
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def snake_case_ (__A : Any , __A : List[str] ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCAmelCase : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
__lowerCAmelCase : Tuple = torch.permute(__A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__A ):
# linear layer
__lowerCAmelCase : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
__lowerCAmelCase : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def snake_case_ (__A : Union[str, Any] , __A : int , __A : Union[str, Any] ) -> List[str]:
if "metadata" in layer:
__lowerCAmelCase : Union[str, Any] = layer.split("""metadata""" )
__lowerCAmelCase : Tuple = """""".join(split_layer[0] )[:-1]
__lowerCAmelCase : int = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
__lowerCAmelCase : int = layer.split("""kvstore""" )
__lowerCAmelCase : int = """""".join(split_layer[0] )[:-1]
__lowerCAmelCase : List[Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
__lowerCAmelCase : List[str] = layer.split("""/""" )
__lowerCAmelCase : Tuple = """/""".join(split_layer[:-1] )
__lowerCAmelCase : Dict = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCAmelCase : Optional[Any] = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__lowerCAmelCase : Dict = """file"""
else:
__lowerCAmelCase : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def snake_case_ (__A : Dict , __A : str ) -> List[str]:
__lowerCAmelCase : Union[str, Any] = rename_keys(__A )
__lowerCAmelCase : Tuple = {}
for k, v in current_block.items():
__lowerCAmelCase : Optional[int] = v
__lowerCAmelCase : List[Any] = new_current_block
torch.save(__A , __A )
def snake_case_ (__A : Optional[Any] , __A : List[Any] , __A : List[str] , __A : int , __A : str = WEIGHTS_NAME ) -> List[str]:
__lowerCAmelCase : str = convert_file_size_to_int(__A )
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Any = {}
__lowerCAmelCase : str = 0
__lowerCAmelCase : List[str] = 0
os.makedirs(__A , exist_ok=__A )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
__lowerCAmelCase : Any = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
__lowerCAmelCase : List[Any] = flatten_dict(__A , sep="""/""" )
__lowerCAmelCase : List[Any] = {}
for layer in checkpoint_info.keys():
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = get_key_and_tensorstore_dict(
__A , __A , __A )
if curr_real_layer_name in all_layers:
__lowerCAmelCase : List[str] = content
else:
__lowerCAmelCase : Optional[int] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCAmelCase : Dict = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCAmelCase : Tuple = torch.tensor(__A )
__lowerCAmelCase : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __A )
__lowerCAmelCase : int = """/""".join(__A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCAmelCase : List[str] = os.path.join(
__A , weights_name.replace(""".bin""" , f'''-{len(__A )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCAmelCase : List[str] = {}
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Any = raw_weights.to(getattr(__A , __A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCAmelCase : Tuple = os.path.join(__A , weights_name.replace(""".bin""" , f'''-{len(__A )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : int = {}
for idx, shard in enumerate(__A ):
__lowerCAmelCase : Optional[int] = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(__A ):05d}.bin''' ) # len(sharded_state_dicts):05d}
__lowerCAmelCase : Optional[int] = os.path.join(__A , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__A , os.path.join(__A , __A ) )
__lowerCAmelCase : List[Any] = shard
for key in shard:
__lowerCAmelCase : Optional[Any] = shard_file
# Add the metadata
__lowerCAmelCase : Dict = {"""total_size""": total_size}
__lowerCAmelCase : Any = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__A , __A ) , """w""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : Optional[Any] = json.dumps(__A , indent=2 , sort_keys=__A ) + """\n"""
f.write(__A )
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__UpperCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def snake_case_ () -> Union[str, Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCAmelCase : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
__lowerCAmelCase : Dict = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
__lowerCAmelCase : Union[str, Any] = TaTokenizer.from_pretrained("""t5-small""" )
__lowerCAmelCase : str = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
__lowerCAmelCase : Optional[Any] = tokenizer(__A , return_tensors="""pt""" ).input_ids
__lowerCAmelCase : List[Any] = model.generate(__A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 651
| 1
|
import collections
import os
import re
from pathlib import Path
_lowerCAmelCase : Optional[Any] = "src/transformers"
# Matches is_xxx_available()
_lowerCAmelCase : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_lowerCAmelCase : Optional[Any] = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCAmelCase : Tuple = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_lowerCAmelCase : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_lowerCAmelCase : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCAmelCase : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCAmelCase : List[str] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCAmelCase : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_lowerCAmelCase : Optional[int] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_lowerCAmelCase : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
_lowerCAmelCase : Optional[Any] = re.compile(R"^\s*else:")
def UpperCAmelCase_ ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if _re_test_backend.search(snake_case__ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(snake_case__ )]
backends.sort()
return "_and_".join(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> List[Any]:
"""simple docstring"""
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(snake_case__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(snake_case__ ).groups()[0]
lowerCAmelCase__ = re.findall(R'\[([^\]]+)\]' , snake_case__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(snake_case__ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(snake_case__ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case__ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(snake_case__ ).groups()[0].split(', ' )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_between_brackets.search(snake_case__ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(snake_case__ ).groups()[0].split(', ' )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_quote_object.search(snake_case__ ) is not None:
objects.append(_re_quote_object.search(snake_case__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(snake_case__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[str]:
"""simple docstring"""
def find_duplicates(snake_case__ ):
return [k for k, v in collections.Counter(snake_case__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = 'base imports' if key == 'none' else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(snake_case__ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(snake_case__ , '__init__.py' )
lowerCAmelCase__ = parse_init(snake_case__ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*snake_case__ )
if len(snake_case__ ) > 0:
lowerCAmelCase__ = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(snake_case__ ) )
if len(snake_case__ ) > 0:
raise ValueError('\n\n'.join(snake_case__ ) )
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(snake_case__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(snake_case__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__ ) / folder).glob('*.py' ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(snake_case__ ) / folder).relative_to(snake_case__ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , '.' )
submodules.append(snake_case__ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(snake_case__ ) / fname).relative_to(snake_case__ ) )
lowerCAmelCase__ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(snake_case__ )
return submodules
_lowerCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
from transformers.utils import direct_transformers_import
lowerCAmelCase__ = direct_transformers_import(snake_case__ )
lowerCAmelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case__ , '__init__.py' ) , 'r' ) as f:
lowerCAmelCase__ = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , snake_case__ ) ) )
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case__ ) > 0:
lowerCAmelCase__ = '\n'.join(f'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
f'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 712
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowerCAmelCase : Optional[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowerCAmelCase : str = {"facebook/blenderbot_small-90M": 5_1_2}
def UpperCAmelCase_ ( snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(snake_case__ )
return pairs
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self ,a_ ,a_ ,a_="__start__" ,a_="__end__" ,a_="__unk__" ,a_="__null__" ,**a_ ,):
"""simple docstring"""
super().__init__(unk_token=a_ ,bos_token=a_ ,eos_token=a_ ,pad_token=a_ ,**a_ )
with open(a_ ,encoding='utf-8' ) as vocab_handle:
lowerCAmelCase__ = json.load(a_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(a_ ,encoding='utf-8' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(a_ ,range(len(a_ ) ) ) )
lowerCAmelCase__ = {}
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = re.sub('([.,!?()])' ,r' \1' ,a_ )
lowerCAmelCase__ = re.sub('(\')' ,r' \1 ' ,a_ )
lowerCAmelCase__ = re.sub(r'\s{2,}' ,' ' ,a_ )
if "\n" in token:
lowerCAmelCase__ = token.replace('\n' ,' __newln__' )
lowerCAmelCase__ = token.split(' ' )
lowerCAmelCase__ = []
for token in tokens:
if not len(a_ ):
continue
lowerCAmelCase__ = token.lower()
lowerCAmelCase__ = tuple(a_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowerCAmelCase__ = get_pairs(a_ )
if not pairs:
words.append(a_ )
continue
while True:
lowerCAmelCase__ = min(a_ ,key=lambda a_ : self.bpe_ranks.get(a_ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(a_ ):
try:
lowerCAmelCase__ = word.index(a_ ,a_ )
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(a_ )
lowerCAmelCase__ = new_word
if len(a_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(a_ )
lowerCAmelCase__ = '@@ '.join(a_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
words.append(a_ )
return " ".join(a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(r'\S+\n?' ,a_ )
for token in words:
split_tokens.extend(list(self.bpe(a_ ).split(' ' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = token.lower()
return self.encoder.get(a_ ,self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
return self.decoder.get(a_ ,self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = ' '.join(a_ ).replace('@@ ' ,'' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a_ ,ensure_ascii=a_ ) + '\n' )
lowerCAmelCase__ = 0
with open(a_ ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase__ = token_index
writer.write(' '.join(a_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 604
| 0
|
'''simple docstring'''
from PIL import Image
def UpperCAmelCase ( lowerCamelCase_ :Image ):
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = image.size
snake_case_ : str = 0
snake_case_ : List[str] = image.load()
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
snake_case_ : str = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
snake_case_ : Any = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__A : Union[str, Any] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 334
|
'''simple docstring'''
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 334
| 1
|
def A__ ( lowercase: int = 1, lowercase: int = 1_000 ) -> int:
A : List[Any] =1
A : Optional[int] =0
for divide_by_number in range(lowercase, digit + 1 ):
A : list[int] =[]
A : str =numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase ):
A : List[Any] =len(lowercase )
A : int =divide_by_number
else:
has_been_divided.append(lowercase )
A : Any =now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661
| 0
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = ''''''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , snake_case = "" , snake_case = None , snake_case = None , **snake_case ) -> List[Any]:
super().__init__(self , **snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_UpperCAmelCase = fsspec.open(
snake_case , mode='rb' , protocol=snake_case , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_UpperCAmelCase = os.path.basename(self.file.path.split('::' )[0] )
_UpperCAmelCase = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_UpperCAmelCase = None
@classmethod
def lowerCamelCase_ ( cls , snake_case ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(snake_case ).lstrip('/' )
def lowerCamelCase_ ( self ) -> Optional[int]:
if self.dir_cache is None:
_UpperCAmelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_UpperCAmelCase = {f['name']: f}
def lowerCamelCase_ ( self , snake_case ) -> int:
return self.file.open().read()
def lowerCamelCase_ ( self , snake_case , snake_case = "rb" , snake_case=None , snake_case=True , snake_case=None , **snake_case , ) -> List[Any]:
_UpperCAmelCase = self._strip_protocol(snake_case )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''bz2'''
_UpperCAmelCase = '''bz2'''
_UpperCAmelCase = '''.bz2'''
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''gzip'''
_UpperCAmelCase = '''gzip'''
_UpperCAmelCase = '''.gz'''
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''lz4'''
_UpperCAmelCase = '''lz4'''
_UpperCAmelCase = '''.lz4'''
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''xz'''
_UpperCAmelCase = '''xz'''
_UpperCAmelCase = '''.xz'''
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''zstd'''
_UpperCAmelCase = '''zstd'''
_UpperCAmelCase = '''.zst'''
def __init__( self , snake_case , snake_case = "rb" , snake_case = None , snake_case = None , snake_case = DEFAULT_BLOCK_SIZE , **snake_case , ) -> List[Any]:
super().__init__(
fo=snake_case , mode=snake_case , target_protocol=snake_case , target_options=snake_case , block_size=snake_case , **snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_UpperCAmelCase = self.file.__enter__
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case ) -> Optional[Any]:
_UpperCAmelCase = file_
def __enter__( self ) -> str:
self._file.__enter__()
return self
def __exit__( self , *snake_case , **snake_case ) -> str:
self._file.__exit__(*snake_case , **snake_case )
def __iter__( self ) -> Union[str, Any]:
return iter(self._file )
def lowerCamelCase_ ( self ) -> int:
return next(self._file )
def __getattr__( self , snake_case ) -> int:
return getattr(self._file , snake_case )
def fixed_enter(*snake_case , **snake_case ):
return WrappedFile(_enter(*snake_case , **snake_case ) )
_UpperCAmelCase = fixed_enter
| 573
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def UpperCAmelCase ( A : Union[str, Any] ):
'''simple docstring'''
return choice(A )
def UpperCAmelCase ( A : list[int] , A : int ):
'''simple docstring'''
_UpperCAmelCase = random_pivot(A )
# partition based on pivot
# linear time
_UpperCAmelCase = [e for e in lst if e < pivot]
_UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
"""simple docstring"""
A__ = size if size is not None else {'height': 18, 'width': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = apply_ocr
def _UpperCAmelCase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A__ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
A__ = Image.open(ds[0]['file'] ).convert('RGB' )
A__ = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A__ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
A__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
A__ = LayoutLMvaImageProcessor(apply_ocr=__a )
A__ = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 554
|
"""simple docstring"""
from PIL import Image
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ , A__ = image.size
A__ = 0
A__ = image.load()
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
A__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
A__ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 554
| 1
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=400 , a=None , a=True , a=True , a=None , ):
lowercase__ : List[Any] = size if size is not None else {'height': 20, 'width': 20}
lowercase__ : Optional[int] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : List[Any] = image_size
lowercase__ : str = min_resolution
lowercase__ : str = max_resolution
lowercase__ : int = size
lowercase__ : Optional[Any] = do_normalize
lowercase__ : Any = do_convert_rgb
lowercase__ : Any = [512, 1024, 2048, 4096]
lowercase__ : Any = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def snake_case_ ( self):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def snake_case_ ( self):
lowercase__ : List[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
lowercase__ : int = Image.open(requests.get(a , stream=a).raw).convert('RGB')
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ):
__lowerCamelCase : Any = PixaStructImageProcessor if is_vision_available() else None
def snake_case_ ( self):
lowercase__ : Optional[Any] = PixaStructImageProcessingTester(self)
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'do_normalize'))
self.assertTrue(hasattr(a , 'do_convert_rgb'))
def snake_case_ ( self):
lowercase__ : Dict = self.image_processor_tester.prepare_dummy_image()
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict)
lowercase__ : int = 2048
lowercase__ : str = image_processor(a , return_tensors='pt' , max_patches=a)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606) , atol=1e-3 , rtol=1e-3))
def snake_case_ ( self):
# Initialize image_processor
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
lowercase__ : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : int = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Any = image_processor(
a , return_tensors='pt' , max_patches=a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ ( self):
# Initialize image_processor
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
lowercase__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
lowercase__ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a):
lowercase__ : int = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a).flattened_patches
lowercase__ : Tuple = 'Hello'
lowercase__ : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a , header_text=a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Optional[Any] = image_processor(
a , return_tensors='pt' , max_patches=a , header_text=a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ ( self):
# Initialize image_processor
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
lowercase__ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Any = image_processor(
a , return_tensors='pt' , max_patches=a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ ( self):
# Initialize image_processor
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
lowercase__ : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : int = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Optional[Any] = image_processor(
a , return_tensors='pt' , max_patches=a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ):
__lowerCamelCase : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def snake_case_ ( self):
lowercase__ : Optional[int] = PixaStructImageProcessingTester(self , num_channels=4)
lowercase__ : Dict = 3
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'do_normalize'))
self.assertTrue(hasattr(a , 'do_convert_rgb'))
def snake_case_ ( self):
# Initialize image_processor
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
lowercase__ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Optional[int] = image_processor(
a , return_tensors='pt' , max_patches=a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 164
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ = logging.get_logger(__name__)
snake_case_ = Dict[str, Any]
snake_case_ = List[Prediction]
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
super().__init__(*a , **a)
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , 'vision')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def snake_case_ ( self , **a):
lowercase__ : Optional[int] = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *a , **a):
return super().__call__(*a , **a)
def snake_case_ ( self , a):
lowercase__ : Optional[int] = load_image(a)
lowercase__ : Any = torch.IntTensor([[image.height, image.width]])
lowercase__ : int = self.image_processor(images=[image] , return_tensors='pt')
if self.tokenizer is not None:
lowercase__ : str = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt')
lowercase__ : Union[str, Any] = target_size
return inputs
def snake_case_ ( self , a):
lowercase__ : Any = model_inputs.pop('target_size')
lowercase__ : Tuple = self.model(**a)
lowercase__ : Dict = outputs.__class__({'target_size': target_size, **outputs})
if self.tokenizer is not None:
lowercase__ : Tuple = model_inputs['bbox']
return model_outputs
def snake_case_ ( self , a , a=0.9):
lowercase__ : Union[str, Any] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase__ , lowercase__ : Tuple = target_size[0].tolist()
def unnormalize(a):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
lowercase__ , lowercase__ : List[str] = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1)
lowercase__ : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase__ : List[Any] = [unnormalize(a) for bbox in model_outputs['bbox'].squeeze(0)]
lowercase__ : str = ['score', 'label', 'box']
lowercase__ : List[str] = [dict(zip(a , a)) for vals in zip(scores.tolist() , a , a) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase__ : Tuple = self.image_processor.post_process_object_detection(a , a , a)
lowercase__ : Union[str, Any] = raw_annotations[0]
lowercase__ : List[Any] = raw_annotation['scores']
lowercase__ : Optional[int] = raw_annotation['labels']
lowercase__ : List[Any] = raw_annotation['boxes']
lowercase__ : List[str] = scores.tolist()
lowercase__ : Any = [self.model.config.idalabel[label.item()] for label in labels]
lowercase__ : str = [self._get_bounding_box(a) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase__ : Union[str, Any] = ['score', 'label', 'box']
lowercase__ : Optional[Any] = [
dict(zip(a , a))
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'])
]
return annotation
def snake_case_ ( self , a):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 164
| 1
|
from __future__ import annotations
def snake_case (UpperCamelCase : list[int | float] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if len(UpperCamelCase ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(UpperCamelCase )
or left < -len(UpperCamelCase )
or right >= len(UpperCamelCase )
or right < -len(UpperCamelCase )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
lowerCamelCase__ = (left + right) >> 1 # the middle
lowerCamelCase__ = find_max(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # find max in range[left, mid]
lowerCamelCase__ = find_max(UpperCamelCase , mid + 1 , UpperCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 235
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Tuple = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 235
| 1
|
"""simple docstring"""
from PIL import Image
def __a ( A , A ) -> Image:
'''simple docstring'''
A__ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(A ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
__UpperCAmelCase =change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 337
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowerCamelCase_ : Tuple = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase_ : int = model(A )['''last_hidden_state''']
lowerCamelCase_ : List[Any] = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
lowerCamelCase_ : Dict = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 422
| 0
|
from ..utils import DummyObject, requires_backends
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : str)-> Optional[int]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : int)-> Any:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *snake_case_ : Optional[Any] , **snake_case_ : Any)-> int:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[Any] , *snake_case_ : List[str] , **snake_case_ : str)-> Union[str, Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : Any , **snake_case_ : Union[str, Any])-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : str , **snake_case_ : Union[str, Any])-> List[str]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : int , *snake_case_ : Any , **snake_case_ : Optional[int])-> int:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[int])-> List[str]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[int])-> Optional[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Any , *snake_case_ : str , **snake_case_ : str)-> str:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : Any , **snake_case_ : Tuple)-> Tuple:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : int , **snake_case_ : Any)-> Any:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Optional[int] , *snake_case_ : int , **snake_case_ : str)-> Optional[Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : List[Any] , **snake_case_ : Tuple)-> Optional[int]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : Optional[int] , **snake_case_ : str)-> Optional[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Optional[Any] , *snake_case_ : str , **snake_case_ : Dict)-> Optional[int]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : Tuple , **snake_case_ : Union[str, Any])-> Any:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any])-> Optional[int]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : str , *snake_case_ : Any , **snake_case_ : Optional[Any])-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any])-> str:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : List[Any])-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[str] , *snake_case_ : List[str] , **snake_case_ : List[Any])-> Tuple:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : List[Any] , **snake_case_ : Tuple)-> str:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Tuple , **snake_case_ : Union[str, Any])-> List[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[Any] , *snake_case_ : List[str] , **snake_case_ : str)-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : List[Any] , **snake_case_ : Any)-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , *snake_case_ : str , **snake_case_ : Dict)-> Optional[int]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[str] , *snake_case_ : List[str] , **snake_case_ : Any)-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Dict)-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case_ : str , **snake_case_ : Any)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Tuple , *snake_case_ : Optional[int] , **snake_case_ : Union[str, Any])-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : Tuple , **snake_case_ : Tuple)-> List[Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : List[str] , **snake_case_ : Any)-> Optional[int]:
requires_backends(cls , ["""torch"""])
def __lowerCAmelCase ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ) -> Optional[Any]:
requires_backends(__lowerCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__lowerCamelCase : Dict , **__lowerCamelCase : Tuple ) -> Optional[Any]:
requires_backends(__lowerCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
requires_backends(__lowerCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> List[Any]:
requires_backends(__lowerCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : int ) -> List[Any]:
requires_backends(__lowerCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ) -> Dict:
requires_backends(__lowerCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
requires_backends(__lowerCamelCase , ["""torch"""] )
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Union[str, Any] , *snake_case_ : List[Any] , **snake_case_ : int)-> str:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : int , *snake_case_ : Dict , **snake_case_ : Union[str, Any])-> Optional[int]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : int , *snake_case_ : Dict , **snake_case_ : List[str])-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : str , *snake_case_ : Optional[Any] , **snake_case_ : Dict)-> Union[str, Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : int)-> List[str]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : List[str])-> Tuple:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Union[str, Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Dict)-> Optional[int]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Optional[int] , **snake_case_ : Optional[int])-> List[Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : Any , **snake_case_ : Dict)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : int , *snake_case_ : Any , **snake_case_ : Optional[int])-> Tuple:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : int , *snake_case_ : int , **snake_case_ : Union[str, Any])-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , *snake_case_ : Union[str, Any] , **snake_case_ : List[str])-> Optional[int]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : int , *snake_case_ : int , **snake_case_ : List[Any])-> List[Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : Any , **snake_case_ : int)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : int , **snake_case_ : int)-> Any:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Any , *snake_case_ : Optional[Any] , **snake_case_ : Any)-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any])-> Optional[int]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : Optional[int])-> int:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Dict , *snake_case_ : Optional[int] , **snake_case_ : Optional[Any])-> List[str]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : str , **snake_case_ : List[str])-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : int , **snake_case_ : Any)-> List[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[Any] , *snake_case_ : Optional[Any] , **snake_case_ : List[str])-> List[Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : List[str] , **snake_case_ : Union[str, Any])-> Optional[int]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : int , **snake_case_ : List[Any])-> str:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : int , *snake_case_ : Tuple , **snake_case_ : Any)-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any])-> str:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *snake_case_ : Dict , **snake_case_ : Dict)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : str)-> List[Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any])-> str:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Dict , **snake_case_ : Union[str, Any])-> Optional[int]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Dict , *snake_case_ : Optional[Any] , **snake_case_ : List[str])-> int:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : int , **snake_case_ : str)-> Optional[int]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : Tuple , **snake_case_ : Any)-> Optional[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Any , *snake_case_ : str , **snake_case_ : Optional[Any])-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple)-> Any:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : int , *snake_case_ : Tuple , **snake_case_ : Union[str, Any])-> str:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : str , *snake_case_ : Any , **snake_case_ : Tuple)-> Tuple:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : Any)-> Dict:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any])-> List[str]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Union[str, Any] , *snake_case_ : int , **snake_case_ : Any)-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : Dict , **snake_case_ : int)-> Optional[Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Tuple)-> int:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : str , *snake_case_ : List[Any] , **snake_case_ : List[Any])-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : Tuple , **snake_case_ : Optional[int])-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : Dict)-> Optional[int]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : str , *snake_case_ : Tuple , **snake_case_ : Union[str, Any])-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : List[Any] , **snake_case_ : Optional[int])-> Optional[int]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case_ : Union[str, Any] , **snake_case_ : str)-> Any:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Any , *snake_case_ : int , **snake_case_ : Optional[Any])-> List[str]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case_ : Optional[Any] , **snake_case_ : str)-> str:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : List[str] , **snake_case_ : Optional[Any])-> int:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Any , *snake_case_ : Optional[Any] , **snake_case_ : Dict)-> Optional[Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : List[Any] , **snake_case_ : List[str])-> Any:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : int , **snake_case_ : Optional[Any])-> Optional[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : Dict)-> Optional[Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple)-> List[str]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : Optional[int])-> str:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : Any)-> List[Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : Tuple , **snake_case_ : int)-> Optional[int]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : Optional[int] , **snake_case_ : Optional[Any])-> List[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Optional[int] , *snake_case_ : Dict , **snake_case_ : int)-> Optional[Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : Tuple , **snake_case_ : List[Any])-> str:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Any , **snake_case_ : Tuple)-> Optional[int]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : Dict)-> int:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : str , **snake_case_ : int)-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : int , *snake_case_ : Tuple , **snake_case_ : Tuple)-> Any:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : str , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int])-> Tuple:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *snake_case_ : Any , **snake_case_ : List[str])-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : Dict)-> Optional[int]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : str , *snake_case_ : Tuple , **snake_case_ : str)-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : int , *snake_case_ : Union[str, Any] , **snake_case_ : Dict)-> List[Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case_ : Union[str, Any] , **snake_case_ : int)-> int:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : int , *snake_case_ : List[Any] , **snake_case_ : int)-> Union[str, Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : List[Any] , **snake_case_ : str)-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : List[Any] , **snake_case_ : Any)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Union[str, Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int])-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : Optional[int] , **snake_case_ : Dict)-> List[str]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : Optional[Any] , **snake_case_ : Any)-> Any:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : str , *snake_case_ : Optional[Any] , **snake_case_ : Dict)-> Optional[int]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : Optional[Any])-> Optional[Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any])-> str:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : int , *snake_case_ : int , **snake_case_ : Dict)-> Tuple:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : Dict)-> Optional[Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , *snake_case_ : int , **snake_case_ : Union[str, Any])-> Dict:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : List[Any])-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Optional[Any] , **snake_case_ : str)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Any , **snake_case_ : Any)-> Optional[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Tuple , *snake_case_ : Dict , **snake_case_ : Optional[Any])-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : Dict , **snake_case_ : List[str])-> Tuple:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : Tuple)-> Dict:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[Any] , *snake_case_ : Optional[Any] , **snake_case_ : List[Any])-> Optional[int]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : str , **snake_case_ : Dict)-> Any:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case_ : Optional[int] , **snake_case_ : Union[str, Any])-> Dict:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[Any])-> Union[str, Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Any , *snake_case_ : Optional[int] , **snake_case_ : Tuple)-> List[str]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case_ : List[str] , **snake_case_ : int)-> List[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Any , *snake_case_ : Optional[Any] , **snake_case_ : List[Any])-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : List[Any] , **snake_case_ : List[str])-> str:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int])-> Tuple:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[Any] , *snake_case_ : str , **snake_case_ : int)-> str:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : str , **snake_case_ : Dict)-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Tuple , **snake_case_ : Optional[Any])-> Optional[int]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Any , *snake_case_ : Tuple , **snake_case_ : Tuple)-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : Optional[Any] , **snake_case_ : List[Any])-> int:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : Tuple , **snake_case_ : Union[str, Any])-> Optional[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : int , *snake_case_ : List[str] , **snake_case_ : int)-> Dict:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : str , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any])-> Optional[Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case_ : str , **snake_case_ : Tuple)-> List[Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : Dict)-> str:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : List[Any] , **snake_case_ : str)-> List[Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case_ : List[Any] , **snake_case_ : str)-> List[str]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : str)-> Union[str, Any]:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : List[Any] , *snake_case_ : Any , **snake_case_ : Dict)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Tuple , *snake_case_ : List[Any] , **snake_case_ : Tuple)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
class __a ( metaclass=SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = ["torch"]
def __init__( self : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple)-> Any:
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : int , *snake_case_ : Any , **snake_case_ : Tuple)-> Union[str, Any]:
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : str)-> List[Any]:
requires_backends(cls , ["""torch"""])
| 456
|
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list:
__lowerCAmelCase =int(__lowerCamelCase )
if n_element < 1:
__lowerCAmelCase =ValueError("""a should be a positive number""" )
raise my_error
__lowerCAmelCase =[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =(0, 0, 0)
__lowerCAmelCase =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
lowercase_ = hamming(int(n))
print('''-----------------------------------------------------''')
print(F"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''')
| 456
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE__ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : str = field(
default=_UpperCamelCase ,metadata={"""help""": """Model type selected in the list: """ + """, """.join(_UpperCamelCase )} )
_UpperCamelCase : str = field(
default=_UpperCamelCase ,metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_UpperCamelCase : int = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
_UpperCamelCase : int = field(
default=128 ,metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} ,)
_UpperCamelCase : int = field(
default=64 ,metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} ,)
_UpperCamelCase : int = field(
default=30 ,metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} ,)
_UpperCamelCase : bool = field(
default=_UpperCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_UpperCamelCase : bool = field(
default=_UpperCamelCase ,metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_UpperCamelCase : float = field(
default=0.0 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_UpperCamelCase : int = field(
default=20 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_UpperCamelCase : int = field(
default=0 ,metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} ,)
_UpperCamelCase : int = field(default=1 ,metadata={"""help""": """multiple threads for converting example to features"""} )
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = """train"""
_UpperCamelCase : Tuple = """dev"""
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : SquadDataTrainingArguments
_UpperCamelCase : List[SquadFeatures]
_UpperCamelCase : Split
_UpperCamelCase : bool
def __init__( self , snake_case , snake_case , snake_case = None , snake_case = Split.train , snake_case = False , snake_case = None , snake_case = "pt" , ) -> List[Any]:
"""simple docstring"""
a__ : Any = args
a__ : List[str] = is_language_sensitive
a__ : str = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case , snake_case ):
try:
a__ : str = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
a__ : int = mode
# Load data features from cache or dataset file
a__ : str = "v2" if args.version_2_with_negative else "v1"
a__ : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ : Any = cached_features_file + ".lock"
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
a__ : List[Any] = time.time()
a__ : List[str] = torch.load(snake_case )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
a__ : str = self.old_features["features"]
a__ : Tuple = self.old_features.get("dataset" , snake_case )
a__ : Union[str, Any] = self.old_features.get("examples" , snake_case )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
a__ : Optional[int] = self.processor.get_dev_examples(args.data_dir )
else:
a__ : List[str] = self.processor.get_train_examples(args.data_dir )
a__ , a__ : int = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case , )
a__ : Optional[int] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> str:
"""simple docstring"""
return len(self.features )
def __getitem__( self , snake_case ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
a__ : str = self.features[i]
a__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
a__ : int = torch.tensor(feature.attention_mask , dtype=torch.long )
a__ : Union[str, Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
a__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
a__ : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
a__ : Optional[Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
a__ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
a__ : Dict = torch.tensor(feature.start_position , dtype=torch.long )
a__ : List[str] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 112
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/reformer-crime-and-punishment""": 5_2_4_2_8_8,
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case=[] , snake_case = None , **snake_case , ) -> None:
"""simple docstring"""
a__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
a__ : Union[str, Any] = vocab_file
a__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _snake_case ( self ) -> Dict[str, int]:
"""simple docstring"""
a__ : Dict = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
"""simple docstring"""
a__ : Tuple = self.__dict__.copy()
a__ : Optional[int] = None
return state
def __setstate__( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : List[Any] = {}
a__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case )
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
a__ : Optional[Any] = self.sp_model.IdToPiece(snake_case )
return token
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
a__ : int = []
a__ : Dict = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case ) + token
a__ : Dict = []
else:
current_sub_tokens.append(snake_case )
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ : Tuple = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
a__ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 112
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 ) -> None:
A_ , A_ : int = row, column
A_ : Optional[Any] = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self ) -> str:
A_ : int = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
A_ : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
A_ : List[Any] = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
A_ : List[str] = F"%{max_element_length}s"
# Make string and return
def single_line(_lowerCamelCase ) -> str:
nonlocal string_format_identifier
A_ : Tuple = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _lowerCamelCase ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _lowerCamelCase , _lowerCamelCase ) -> None:
assert self.validate_indicies(_lowerCamelCase )
A_ : Union[str, Any] = value
def __add__( self , _lowerCamelCase ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
A_ : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : Dict = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
A_ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : Optional[Any] = -self[r, c]
return result
def __sub__( self , _lowerCamelCase ) -> Matrix:
return self + (-another)
def __mul__( self , _lowerCamelCase ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
A_ : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : Any = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
A_ : Union[str, Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A_ : Optional[Any] = F"Unsupported type given for another ({type(_lowerCamelCase )})"
raise TypeError(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Matrix:
A_ : Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A_ : Any = self[r, c]
return result
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A_ : List[str] = v.transpose()
A_ : Optional[int] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
A_ : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
A_ : Any = 1
print(F"a^(-1) is {ainv}" )
# u, v
A_ : List[Any] = Matrix(3 , 1 , 0 )
A_ , A_ , A_ : Optional[int] = 1, 2, -3
A_ : int = Matrix(3 , 1 , 0 )
A_ , A_ , A_ : List[str] = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a_ , a_ )}" )
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 385
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = torch.device('cpu')
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : int = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : Optional[Any] = dct.pop(a_ )
A_ : Union[str, Any] = val
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[int] = []
for k in state_dict.keys():
A_ : Any = k
if ".pwconv" in k:
A_ : Optional[int] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
A_ : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
A_ : Tuple = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
A_ : str = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
A_ : Optional[int] = k_new.split(""".""" )
if ls[2].isdigit():
A_ : Optional[int] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
A_ : List[Any] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
A_ : Any = 1_0_0_0
A_ : Any = """huggingface/label-files"""
A_ : str = """imagenet-1k-id2label.json"""
A_ : List[str] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : int = {int(a_ ): v for k, v in idalabel.items()}
A_ : Tuple = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
A_ : List[str] = [3, 3, 6, 4]
A_ : Dict = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
A_ : Tuple = [3, 3, 9, 6]
A_ : List[str] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
A_ : Any = [4, 3, 1_0, 5]
A_ : Any = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
A_ : int = [4, 4, 1_2, 6]
A_ : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
A_ : List[Any] = torch.hub.load_state_dict_from_url(a_ , map_location="""cpu""" , check_hash=a_ )
else:
A_ : str = torch.load(a_ , map_location="""cpu""" )
A_ : Tuple = checkpoint
A_ : int = create_rename_keys(a_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a_ , a_ , a_ )
# load HuggingFace model
A_ : Any = SwiftFormerForImageClassification(a_ ).eval()
hf_model.load_state_dict(a_ )
# prepare test inputs
A_ : str = prepare_img()
A_ : str = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
A_ : Optional[Any] = processor(images=a_ , return_tensors="""pt""" )
# compare outputs from both models
A_ : Any = get_expected_output(a_ )
A_ : Optional[int] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , a_ , atol=1E-3 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
UpperCamelCase__ : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 385
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase_ ( unittest.TestCase, __lowercase ):
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = load_tool('''text-classification''' )
self.tool.setup()
_UpperCamelCase = load_tool('''text-classification''' , remote=_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(_A , '''positive''' )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(_A , '''positive''' )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(_A , '''positive''' )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(_A , '''positive''' )
| 10
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"unc-nlp/lxmert-base-uncased": 512,
}
__magic_name__ = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = LxmertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_snake_case )
UpperCAmelCase = do_lower_case
def snake_case_ ( self , _snake_case , _snake_case=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 254
| 0
|
"""simple docstring"""
from ....utils import logging
_a : Any = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=None , a__=2048 ):
_lowerCAmelCase : List[str] = config.__dict__
_lowerCAmelCase : Optional[Any] = modal_hidden_size
if num_labels:
_lowerCAmelCase : Optional[Any] = num_labels
| 663
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663
| 1
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__UpperCAmelCase = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCAmelCase__ : str = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
UpperCAmelCase__ : Tuple = config_class.from_json_file(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : List[str] = True
print(F"Building TensorFlow model from configuration: {config}" )
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCAmelCase__ : Union[str, Any] = cached_file(
__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCAmelCase__ : int = load_pytorch_checkpoint_in_tfa_model(__UpperCamelCase , __UpperCamelCase )
if compare_with_pt_model:
UpperCAmelCase__ : List[Any] = tf_model(tf_model.dummy_inputs , training=__UpperCamelCase ) # build the network
UpperCAmelCase__ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase__ : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase , config=__UpperCamelCase , state_dict=__UpperCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = pt_model(**pt_model.dummy_inputs )
UpperCAmelCase__ : int = pto[0].numpy()
UpperCAmelCase__ : int = tfo[0].numpy()
UpperCAmelCase__ : Optional[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(F"Max absolute difference between models outputs {diff}" )
assert diff <= 2e-2, F"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(F"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(__UpperCamelCase , save_format="""h5""" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , ):
'''simple docstring'''
if args_model_type is None:
UpperCAmelCase__ : str = list(MODEL_CLASSES.keys() )
else:
UpperCAmelCase__ : int = [args_model_type]
for j, model_type in enumerate(__UpperCamelCase , start=1 ):
print("""=""" * 100 )
print(F" Converting model type {j}/{len(__UpperCamelCase )}: {model_type}" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCAmelCase__ : str = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCAmelCase__ : List[str] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__UpperCamelCase , __UpperCamelCase ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
UpperCAmelCase__ : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(F" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
F" Converting checkpoint {i}/{len(__UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
UpperCAmelCase__ : int = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
else:
UpperCAmelCase__ : Union[str, Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCAmelCase__ : Optional[Any] = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
else:
UpperCAmelCase__ : Any = model_shortcut_name
if os.path.isfile(__UpperCamelCase ):
UpperCAmelCase__ : int = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__UpperCamelCase , pytorch_checkpoint_path=__UpperCamelCase , config_file=__UpperCamelCase , tf_dump_path=os.path.join(__UpperCamelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__UpperCamelCase , )
if remove_cached_files:
os.remove(__UpperCamelCase )
os.remove(__UpperCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
__UpperCAmelCase = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 65
|
import os
def A_ ( ) -> Union[str, Any]:
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
UpperCamelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
UpperCamelCase : str = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase : Tuple = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 629
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 255 , a__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A_ : Optional[int] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A_ : str = parent
A_ : Dict = batch_size
A_ : str = num_channels
A_ : List[str] = min_resolution
A_ : List[Any] = max_resolution
A_ : Optional[Any] = do_resize
A_ : int = size
A_ : List[Any] = do_normalize
A_ : List[Any] = image_mean
A_ : Dict = image_std
A_ : Tuple = do_rescale
A_ : List[Any] = rescale_factor
A_ : Tuple = do_pad
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self , a__ , a__=False ):
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(a__ , Image.Image ):
A_ , A_ : Tuple = image.size
else:
A_ , A_ : List[Any] = image.shape[1], image.shape[2]
if w < h:
A_ : List[str] = int(self.size["""shortest_edge"""] * h / w )
A_ : Tuple = self.size["""shortest_edge"""]
elif w > h:
A_ : Tuple = self.size["""shortest_edge"""]
A_ : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
A_ : Optional[Any] = self.size["""shortest_edge"""]
A_ : int = self.size["""shortest_edge"""]
else:
A_ : List[Any] = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(a__ , key=lambda a__ : item[0] )[0]
A_ : Tuple = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = DetaImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
A_ : str = DetaImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
A_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """do_rescale""" ) )
self.assertTrue(hasattr(a__ , """do_pad""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
def _lowerCamelCase ( self ):
A_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , a__ )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[int] = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
A_ : Optional[int] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[int] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
A_ , A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : str = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : str = image_processing(a__ , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self ):
# prepare image and target
A_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A_ : int = json.loads(f.read() )
A_ : Union[str, Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
A_ : str = DetaImageProcessor()
A_ : int = image_processing(images=a__ , annotations=a__ , return_tensors="""pt""" )
# verify pixel values
A_ : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , a__ )
A_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
A_ : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) )
# verify boxes
A_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ )
A_ : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1E-3 ) )
# verify image_id
A_ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) )
# verify is_crowd
A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) )
# verify class_labels
A_ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) )
# verify orig_size
A_ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) )
# verify size
A_ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
@slow
def _lowerCamelCase ( self ):
# prepare image, target and masks_path
A_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A_ : Any = json.loads(f.read() )
A_ : Any = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
A_ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A_ : str = DetaImageProcessor(format="""coco_panoptic""" )
A_ : Tuple = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors="""pt""" )
# verify pixel values
A_ : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , a__ )
A_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
A_ : Dict = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ )
A_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1E-3 ) )
# verify image_id
A_ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) )
# verify is_crowd
A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) )
# verify class_labels
A_ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) )
# verify masks
A_ : Tuple = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , a__ )
# verify orig_size
A_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) )
# verify size
A_ : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
| 481
| 0
|
"""simple docstring"""
from __future__ import annotations
def a ( __snake_case : List[str], __snake_case : Any, __snake_case : int, __snake_case : List[str] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCAmelCase_ :List[Any] = (l + r) // 2
if v[m] >= key:
UpperCAmelCase_ :List[str] = m
else:
UpperCAmelCase_ :List[str] = m # noqa: E741
return r
def a ( __snake_case : list[int] ):
'''simple docstring'''
if len(_lowerCAmelCase ) == 0:
return 0
UpperCAmelCase_ :Dict = [0] * len(_lowerCAmelCase )
UpperCAmelCase_ :Tuple = 1
UpperCAmelCase_ :int = v[0]
for i in range(1, len(_lowerCAmelCase ) ):
if v[i] < tail[0]:
UpperCAmelCase_ :str = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase_ :List[str] = v[i]
length += 1
else:
UpperCAmelCase_ :Tuple = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[str, Any] = ["input_features"]
def __init__( self , a=8_0 , a=1_6_0_0_0 , a=1_6_0 , a=3_0 , a=4_0_0 , a=0.0 , a=False , **a , ) -> Any:
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , )
lowercase__ : Any = n_fft
lowercase__ : List[Any] = hop_length
lowercase__ : str = chunk_length
lowercase__ : Optional[int] = chunk_length * sampling_rate
lowercase__ : Any = self.n_samples // hop_length
lowercase__ : Union[str, Any] = sampling_rate
lowercase__ : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , )
def _UpperCAmelCase ( self , a ) -> np.ndarray:
lowercase__ : List[str] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
lowercase__ : int = log_spec[:, :-1]
lowercase__ : List[Any] = np.maximum(a , log_spec.max() - 8.0 )
lowercase__ : Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _UpperCAmelCase ( a , a , a = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowercase__ : Dict = np.array(a , np.intaa )
lowercase__ : Any = []
for vector, length in zip(a , attention_mask.sum(-1 ) ):
lowercase__ : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowercase__ : Optional[int] = padding_value
normed_input_values.append(a )
else:
lowercase__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , a , a = True , a = None , a = None , a = None , a = "max_length" , a = None , a = None , a = None , **a , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ : str = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ : Optional[Any] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ : int = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Tuple = [np.asarray([raw_speech] ).T]
lowercase__ : Any = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
lowercase__ : int = self.pad(
a , padding=a , max_length=max_length if max_length else self.n_samples , truncation=a , pad_to_multiple_of=a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase__ : Dict = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
lowercase__ : str = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
lowercase__ : Any = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
lowercase__ : Union[str, Any] = [self._np_extract_fbank_features(a ) for waveform in input_features[0]]
if isinstance(input_features[0] , a ):
lowercase__ : Union[str, Any] = [np.asarray(a , dtype=np.floataa ) for feature in input_features]
else:
lowercase__ : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase__ : List[str] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
lowercase__ : Tuple = padded_inputs.convert_to_tensors(a )
return padded_inputs
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = copy.deepcopy(self.__dict__ )
lowercase__ : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 599
| 0
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar("""KT""")
__UpperCAmelCase = TypeVar("""VT""")
class lowercase__( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = "root" , __SCREAMING_SNAKE_CASE = None) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any =key
UpperCamelCase__ : str =value
UpperCamelCase__ : list[Node[KT, VT]] =[]
def __repr__( self) -> str:
"""simple docstring"""
return F'''Node({self.key}: {self.value})'''
@property
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
return len(self.forward)
class lowercase__( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 0.5 , __SCREAMING_SNAKE_CASE = 16) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Node[KT, VT] =Node[KT, VT]()
UpperCamelCase__ : Tuple =0
UpperCamelCase__ : Union[str, Any] =p
UpperCamelCase__ : Any =max_level
def __str__( self) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] =list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'''SkipList(level={self.level})'''
UpperCamelCase__ : Tuple =max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
UpperCamelCase__ : int =max(__SCREAMING_SNAKE_CASE , 4) + 4
UpperCamelCase__ : Dict =self.head
UpperCamelCase__ : Optional[int] =[]
UpperCamelCase__ : str =node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(__SCREAMING_SNAKE_CASE , "-") + "* " * len(__SCREAMING_SNAKE_CASE))
lines.append(" " * label_size + "| " * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
UpperCamelCase__ : Tuple =node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(__SCREAMING_SNAKE_CASE , "-")
+ " ".join(str(n.key) if n.key == node.key else "|" for n in forwards))
lines.append(" " * label_size + "| " * len(__SCREAMING_SNAKE_CASE))
UpperCamelCase__ : int =node.forward
lines.append("None".ljust(__SCREAMING_SNAKE_CASE) + "* " * len(__SCREAMING_SNAKE_CASE))
return F'''SkipList(level={self.level})\n''' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] =self.head
while len(node.forward) != 0:
yield node.forward[0].key
UpperCamelCase__ : Any =node.forward[0]
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : Dict =1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =[]
UpperCamelCase__ : Union[str, Any] =self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase__ : Tuple =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Dict =self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase__ : str =node.forward[i]
else:
UpperCamelCase__ : str =update_node.forward[:i]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Tuple =self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
UpperCamelCase__ : List[str] =value
else:
UpperCamelCase__ : Optional[int] =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
UpperCamelCase__ : Union[str, Any] =level
UpperCamelCase__ : Tuple =Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
UpperCamelCase__ : List[str] =new_node
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> VT | None:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Tuple =self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 1_2 )
skip_list.insert("Key3" , 4_1 )
skip_list.insert("Key4" , -1_9 )
UpperCamelCase__ : Optional[int] =skip_list.head
UpperCamelCase__ : Dict ={}
while node.level != 0:
UpperCamelCase__ : Optional[Any] =node.forward[0]
UpperCamelCase__ : int =node.value
assert len(A_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str =SkipList()
skip_list.insert("Key1" , 1_0 )
skip_list.insert("Key1" , 1_2 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 1_0 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 1_0 )
UpperCamelCase__ : Optional[Any] =skip_list.head
UpperCamelCase__ : Optional[Any] ={}
while node.level != 0:
UpperCamelCase__ : Union[str, Any] =node.forward[0]
UpperCamelCase__ : int =node.value
if len(A_ ) != 4:
print()
assert len(A_ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple =SkipList()
assert skip_list.find("Some key" ) is None
def _lowerCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[str] =SkipList()
skip_list.insert("Key2" , 2_0 )
assert skip_list.find("Key2" ) == 2_0
skip_list.insert("Some Key" , 1_0 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 1_3 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 1_0
assert skip_list.find("V" ) == 1_3
def _lowerCamelCase ( ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] =SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def _lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] =SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple =SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 1_4
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Optional[int] =SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4_2 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("X" )
def traverse_keys(A_ : Tuple ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
def is_sorted(A_ : Union[str, Any] ):
return all(next_item >= item for item, next_item in zip(A_ , lst[1:] ) )
UpperCamelCase__ : List[str] =SkipList()
for i in range(1_0 ):
skip_list.insert(A_ , A_ )
assert is_sorted(list(A_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A_ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(A_ ) )
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple =SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 582
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple =ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=A_ )
UpperCamelCase__ : Tuple =parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A_ )
EnvironmentCommand.register_subcommand(A_ )
TestCommand.register_subcommand(A_ )
RunBeamCommand.register_subcommand(A_ )
DummyDataCommand.register_subcommand(A_ )
# Parse args
UpperCamelCase__ , UpperCamelCase__ : List[Any] =parser.parse_known_args()
if not hasattr(A_ , "func" ):
parser.print_help()
exit(1 )
UpperCamelCase__ : Union[str, Any] =parse_unknown_args(A_ )
# Run
UpperCamelCase__ : Tuple =args.func(A_ , **A_ )
service.run()
if __name__ == "__main__":
main()
| 582
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float , _UpperCAmelCase : float ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_UpperCAmelCase ) * abs(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 4
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Optional[int] = RoCBertTokenizer
UpperCamelCase_ :List[Any] = None
UpperCamelCase_ :Union[str, Any] = False
UpperCamelCase_ :Dict = True
UpperCamelCase_ :Union[str, Any] = filter_non_english
def __snake_case ( self : Any ):
super().setUp()
lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = i
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __snake_case ( self : int ):
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Any ):
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : int ):
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCAmelCase__ = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __snake_case ( self : Optional[Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __snake_case ( self : Tuple ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __snake_case ( self : int ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __snake_case ( self : str ):
lowerCAmelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCAmelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __snake_case ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase__ = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , '''do_lower_case''' ) else False
lowerCAmelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = ['''的''', '''人''', '''有''']
lowerCAmelCase__ = ''''''.join(SCREAMING_SNAKE_CASE_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase__ = tokenizer.encode('''你好''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.encode('''你是谁''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase__ = '''你好,你是谁'''
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 288
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCAmelCase_ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=50 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def __snake_case ( self : List[str] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def __snake_case ( self : str ):
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase__ = True
lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = True
lowerCAmelCase__ = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['''hidden_states'''][0]
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = BertGenerationDecoder(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCamelCase_ :str = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCamelCase_ :List[str] = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = BertGenerationEncoderTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
lowerCAmelCase__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : Any ):
lowerCAmelCase__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
lowerCAmelCase__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 288
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = "laion/clap-htsat-unfused"
_UpperCAmelCase : Any = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Any = self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
_UpperCAmelCase : Any = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Tuple = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
_UpperCAmelCase : Optional[int] = floats_list((3, 1000) )
_UpperCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors="np" )
_UpperCAmelCase : Any = processor(audios=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Tuple = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
_UpperCAmelCase : Any = "This is a test string"
_UpperCAmelCase : List[str] = processor(text=A_ )
_UpperCAmelCase : Union[str, Any] = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_feature_extractor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : List[str] = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
_UpperCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(A_ )
_UpperCAmelCase : Any = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : List[str] = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 300
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[list[int]] ) -> bool:
_UpperCAmelCase : int = len(lowerCAmelCase )
# We need to create solution object to save path.
_UpperCAmelCase : List[Any] = [[0 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )]
_UpperCAmelCase : Tuple = run_maze(lowerCAmelCase , 0 , 0 , lowerCAmelCase )
if solved:
print("\n".join(str(lowerCAmelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: list[list[int]] ) -> bool:
_UpperCAmelCase : str = len(lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
_UpperCAmelCase : Tuple = 1
return True
_UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
_UpperCAmelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_UpperCAmelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_UpperCAmelCase : Tuple = 1
# check for directions
if (
run_maze(lowerCAmelCase , i + 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j + 1 , lowerCAmelCase )
or run_maze(lowerCAmelCase , i - 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j - 1 , lowerCAmelCase )
):
return True
_UpperCAmelCase : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowerCamelCase = TypeVar("""_T""")
class _SCREAMING_SNAKE_CASE (Generic[_T] ):
def __init__( self : int , UpperCamelCase : Dict = None )->Dict:
__SCREAMING_SNAKE_CASE : int = list(iterable or [] )
__SCREAMING_SNAKE_CASE : int = []
def __len__( self : Optional[int] )->Union[str, Any]:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] )->Tuple:
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def __snake_case ( self : List[Any] , UpperCamelCase : List[Any] )->List[Any]:
self._stacka.append(__lowerCAmelCase )
def __snake_case ( self : Tuple )->str:
__SCREAMING_SNAKE_CASE : Optional[Any] = self._stacka.pop
__SCREAMING_SNAKE_CASE : str = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[int] )->None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 447
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = u
for i in range(1 ,__UpperCamelCase ):
lowerCamelCase_ = temp * (u - i)
return temp
def _UpperCamelCase ( ) -> None:
lowerCamelCase_ = int(input('enter the numbers of values: ' ) )
lowerCamelCase_ = []
for _ in range(__UpperCamelCase ):
y.append([] )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
y[i].append(__UpperCamelCase )
lowerCamelCase_ = 0
print('enter the values of parameters in a list: ' )
lowerCamelCase_ = list(map(__UpperCamelCase ,input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__UpperCamelCase ):
lowerCamelCase_ = float(input() )
lowerCamelCase_ = int(input('enter the value to interpolate: ' ) )
lowerCamelCase_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,__UpperCamelCase ):
for j in range(n - i ):
lowerCamelCase_ = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase_ = y[0][0]
for i in range(1 ,__UpperCamelCase ):
summ += (ucal(__UpperCamelCase ,__UpperCamelCase ) * y[0][i]) / math.factorial(__UpperCamelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 42
|
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = [0] * len(a )
__snake_case = []
__snake_case = []
__snake_case = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a ) ):
if indegree[i] == 0:
queue.append(a )
while queue:
__snake_case = queue.pop(0 )
cnt += 1
topo.append(a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(a )
if cnt != len(a ):
print('Cycle exists' )
else:
print(a )
# Adjacency List of Graph
_lowercase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 356
| 0
|
import argparse
import os
import re
import packaging.version
_lowerCamelCase = """examples/"""
_lowerCamelCase = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowerCamelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowerCamelCase = """README.md"""
def _lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
with open(__lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.read()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE : int = replace.replace("VERSION" , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = re_pattern.sub(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
for folder, directories, fnames in os.walk(__lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , pattern="examples" )
def _lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not patch:
update_version_in_examples(__lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = "🤗 Transformers currently provides the following architectures"
__SCREAMING_SNAKE_CASE : Tuple = "1. Want to contribute a new model?"
with open(__lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE : Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__SCREAMING_SNAKE_CASE : Optional[int] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(__lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = f.read()
__SCREAMING_SNAKE_CASE : Optional[Any] = REPLACE_PATTERNS["init"][0].search(__lowerCamelCase ).groups()[0]
return packaging.version.parse(__lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any]=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE : Optional[Any] = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE : Any = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__SCREAMING_SNAKE_CASE : Optional[int] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE : Any = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__lowerCamelCase ) == 0:
__SCREAMING_SNAKE_CASE : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__lowerCamelCase , patch=__lowerCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = get_version()
__SCREAMING_SNAKE_CASE : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__SCREAMING_SNAKE_CASE : int = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE : str = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__lowerCamelCase ) == 0:
__SCREAMING_SNAKE_CASE : str = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__lowerCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowerCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 447
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = analyze_text(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE : int = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE : List[Any] = single_char_strings[ch]
__SCREAMING_SNAKE_CASE : str = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__SCREAMING_SNAKE_CASE : Optional[int] = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE : int = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE : Optional[Any] = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
__SCREAMING_SNAKE_CASE : int = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowerCAmelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 447
| 1
|
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0')
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0')
if principal <= 0:
raise ValueError('principal must be > 0')
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0')
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0')
if principal <= 0:
raise ValueError('principal must be > 0')
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0')
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0')
if principal <= 0:
raise ValueError('principal must be > 0')
return compound_interest(
_UpperCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ : str = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case_ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case_ )
def __magic_name__ ( self : Dict , **snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Tuple = {}
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case__ : Any = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
snake_case__ : List[str] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
snake_case__ : int = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
snake_case__ : int = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
snake_case__ : List[str] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
snake_case__ : int = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
snake_case__ : Optional[Any] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
snake_case__ : List[str] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
snake_case__ : int = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
snake_case__ : str = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , snake_case_ : Optional[int] , *snake_case_ : Any , snake_case_ : str=None , snake_case_ : List[Any]=None , **snake_case_ : List[Any] ):
'''simple docstring'''
return super().__call__(snake_case_ , *snake_case_ , num_workers=snake_case_ , batch_size=snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : Any , snake_case_ : Optional[Any]=6_4 , snake_case_ : int = 0 , snake_case_ : float = 5_1_2 / 1_5_0_0 , snake_case_ : Optional[int] = 3_2 , snake_case_ : Optional[int] = 1 , ):
'''simple docstring'''
snake_case__ : Dict = load_image(snake_case_ )
snake_case__ : Optional[int] = self.image_processor.size['''longest_edge''']
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.image_processor.generate_crop_boxes(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self.image_processor(images=snake_case_ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
snake_case__ : List[str] = self.get_inference_context()
with inference_context():
snake_case__ : Dict = self._ensure_tensor_on_device(snake_case_ , device=self.device )
snake_case__ : Tuple = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
snake_case__ : str = image_embeddings
snake_case__ : Dict = grid_points.shape[1]
snake_case__ : int = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , snake_case_ , snake_case_ ):
snake_case__ : str = grid_points[:, i : i + points_per_batch, :, :]
snake_case__ : Optional[Any] = input_labels[:, i : i + points_per_batch]
snake_case__ : List[str] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : Optional[Any]=0.8_8 , snake_case_ : Dict=0.9_5 , snake_case_ : List[str]=0 , snake_case_ : Dict=1 , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = model_inputs.pop('''input_boxes''' )
snake_case__ : Union[str, Any] = model_inputs.pop('''is_last''' )
snake_case__ : List[str] = model_inputs.pop('''original_sizes''' ).tolist()
snake_case__ : int = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
snake_case__ : List[Any] = self.model(**snake_case_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case__ : Optional[int] = model_outputs['''pred_masks''']
snake_case__ : Optional[Any] = self.image_processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , binarize=snake_case_ )
snake_case__ : str = model_outputs['''iou_scores''']
snake_case__ , snake_case__ , snake_case__ : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __magic_name__ ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : List[str]=False , snake_case_ : int=False , snake_case_ : Tuple=0.7 , ):
'''simple docstring'''
snake_case__ : Tuple = []
snake_case__ : str = []
snake_case__ : Optional[int] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
snake_case__ : Union[str, Any] = torch.cat(snake_case_ )
snake_case__ : Dict = torch.cat(snake_case_ )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self.image_processor.post_process_for_mask_generation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Tuple = defaultdict(snake_case_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case_ )
snake_case__ : str = {}
if output_rle_mask:
snake_case__ : Union[str, Any] = rle_mask
if output_bboxes_mask:
snake_case__ : Union[str, Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 347
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : List[Any] , *a__ : Tuple , **a__ : Tuple ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 245
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 245
| 1
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 143
|
def A__ ( lowerCamelCase = 4_00_00_00 ) -> int:
UpperCamelCase_: Dict = []
UpperCamelCase_, UpperCamelCase_: Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_: int = b, a + b
return sum(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 548
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
try:
lowerCamelCase__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase__ : Optional[Any] = strtobool(UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_A : Dict =parse_flag_from_env('''RUN_SLOW''', default=False)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
return unittest.skip("""Test was skipped""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=None , UpperCamelCase=None ) -> List[Any]:
if test_case is None:
return partial(UpperCamelCase , version=UpperCamelCase )
return unittest.skipUnless(is_torch_version(""">=""" , UpperCamelCase ) , f'''test requires torch version >= {version}''' )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(UpperCamelCase )
_A : Union[str, Any] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(UpperCamelCase )
class _lowercase ( unittest.TestCase ):
a = True
@classmethod
def lowerCamelCase_ ( cls: Dict ):
lowerCamelCase__ : str = tempfile.mkdtemp()
@classmethod
def lowerCamelCase_ ( cls: Optional[int] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase_ ( self: int ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[mock.Mock, List[mock.Mock]] ):
lowerCamelCase__ : List[str] = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Tuple = AcceleratorState()
lowerCamelCase__ : List[str] = tensor[None].clone().to(state.device )
lowerCamelCase__ : Optional[Any] = gather(UpperCamelCase ).cpu()
lowerCamelCase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCamelCase ):
return False
return True
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Any = returncode
lowerCamelCase__ : Tuple = stdout
lowerCamelCase__ : Union[str, Any] = stderr
async def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any:
while True:
lowerCamelCase__ : str = await stream.readline()
if line:
callback(UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(UpperCamelCase ) )
lowerCamelCase__ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase__ : Any = []
lowerCamelCase__ : Union[str, Any] = []
def tee(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="" ):
lowerCamelCase__ : Union[str, Any] = line.decode("""utf-8""" ).rstrip()
sink.append(UpperCamelCase )
if not quiet:
print(UpperCamelCase , UpperCamelCase , file=UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase : tee(UpperCamelCase , UpperCamelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase : tee(UpperCamelCase , UpperCamelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=UpperCamelCase , )
return _RunOutput(await p.wait() , UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=180 , UpperCamelCase=False , UpperCamelCase=True ) -> _RunOutput:
lowerCamelCase__ : Dict = asyncio.get_event_loop()
lowerCamelCase__ : int = loop.run_until_complete(
_stream_subprocess(UpperCamelCase , env=UpperCamelCase , stdin=UpperCamelCase , timeout=UpperCamelCase , quiet=UpperCamelCase , echo=UpperCamelCase ) )
lowerCamelCase__ : str = """ """.join(UpperCamelCase )
if result.returncode > 0:
lowerCamelCase__ : int = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _lowercase ( _lowercase ):
pass
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict:
try:
lowerCamelCase__ : Union[str, Any] = subprocess.check_output(UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase , """decode""" ):
lowerCamelCase__ : List[str] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 631
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase__ ( _lowercase , _lowercase ):
lowercase__ : Tuple = "convnextv2"
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="gelu" , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0.0 , UpperCamelCase__=2_24 , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A__ = num_channels
A__ = patch_size
A__ = num_stages
A__ = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
A__ = [3, 3, 9, 3] if depths is None else depths
A__ = hidden_act
A__ = initializer_range
A__ = layer_norm_eps
A__ = drop_path_rate
A__ = image_size
A__ = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
A__ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
| 337
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
super().__init__(*A , **A )
lowerCamelCase_ : Optional[int] = {}
def UpperCAmelCase__ (self , A , *A , **A ):
lowerCamelCase_ : int = super().add_tokens(A , *A , **A )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def UpperCAmelCase__ (self , A , *A , A=1 , **A ):
lowerCamelCase_ : List[str] = []
if num_vec_per_token == 1:
self.try_adding_tokens(A , *A , **A )
output.append(A )
else:
lowerCamelCase_ : Union[str, Any] = []
for i in range(A ):
lowerCamelCase_ : Dict = placeholder_token + F"""_{i}"""
self.try_adding_tokens(A , *A , **A )
output.append(A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowerCamelCase_ : Any = output
def UpperCAmelCase__ (self , A , A=False , A=1.0 ):
if isinstance(A , A ):
lowerCamelCase_ : Any = []
for i in range(len(A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase_ : List[Any] = self.token_map[placeholder_token]
lowerCamelCase_ : Tuple = tokens[: 1 + int(len(A ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase_ : str = copy.copy(A )
random.shuffle(A )
lowerCamelCase_ : Dict = text.replace(A , ''' '''.join(A ) )
return text
def __call__(self , A , *A , A=False , A=1.0 , **A ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
A , vector_shuffle=A , prop_tokens_to_load=A ) , *A , **A , )
def UpperCAmelCase__ (self , A , *A , A=False , A=1.0 , **A ):
return super().encode(
self.replace_placeholder_tokens_in_text(
A , vector_shuffle=A , prop_tokens_to_load=A ) , *A , **A , )
| 422
| 0
|
from __future__ import annotations
_UpperCamelCase : Optional[int] =1.6_021E-19 # units = C
def a__ (__lowercase :str , __lowercase :List[Any] , __lowercase :Optional[int] , ) -> int:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Optional[int] = MgpstrTokenizer
__snake_case : str = False
__snake_case : List[Any] = {}
__snake_case : Optional[int] = False
def A__ ( self ):
super().setUp()
# fmt: off
_A : Optional[Any] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_A : Optional[int] = dict(zip(A__ ,range(len(A__ ) ) ) )
_A : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
def A__ ( self ,**A__ ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**A__ )
def A__ ( self ,A__ ):
_A : str = '''tester'''
_A : Any = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def A__ ( self ):
pass
def A__ ( self ):
_A : Any = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_A : Optional[int] = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_A : Dict = tokenizer.encode([special_token] ,add_special_tokens=A__ )
self.assertEqual(len(A__ ) ,1 )
_A : Optional[Any] = tokenizer.decode(A__ ,skip_special_tokens=A__ )
self.assertTrue(special_token not in decoded )
def A__ ( self ):
_A : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_A , _A : Tuple = self.get_input_output_texts(A__ )
_A : str = tokenizer.tokenize(A__ )
_A : Optional[int] = tokenizer.convert_tokens_to_ids(A__ )
_A : Union[str, Any] = tokenizer.encode(A__ ,add_special_tokens=A__ )
self.assertListEqual(A__ ,A__ )
_A : int = tokenizer.convert_ids_to_tokens(A__ )
self.assertNotEqual(len(A__ ) ,0 )
_A : List[Any] = tokenizer.decode(A__ )
self.assertIsInstance(A__ ,A__ )
self.assertEqual(text_a.replace(''' ''' ,'''''' ) ,A__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def A__ ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def A__ ( self ):
pass
| 332
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.