code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=True , __lowerCAmelCase=1 / 2_5_5 , __lowerCAmelCase=True , ): """simple docstring""" # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __magic_name__ :str = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} __magic_name__ :Union[str, Any] = parent __magic_name__ :Optional[Any] = batch_size __magic_name__ :Tuple = num_channels __magic_name__ :int = min_resolution __magic_name__ :Union[str, Any] = max_resolution __magic_name__ :Any = do_resize __magic_name__ :Any = size __magic_name__ :int = do_normalize __magic_name__ :Dict = image_mean __magic_name__ :Optional[int] = image_std __magic_name__ :int = do_rescale __magic_name__ :List[str] = rescale_factor __magic_name__ :Union[str, Any] = do_pad def A ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def A ( self , __lowerCAmelCase , __lowerCAmelCase=False ): """simple docstring""" if not batched: __magic_name__ :Optional[Any] = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): __magic_name__ , __magic_name__ :Tuple = image.size else: __magic_name__ , __magic_name__ :Optional[int] = image.shape[1], image.shape[2] if w < h: __magic_name__ :Tuple = int(self.size['''shortest_edge'''] * h / w ) __magic_name__ :Optional[Any] = self.size['''shortest_edge'''] elif w > h: __magic_name__ :Any = self.size['''shortest_edge'''] __magic_name__ :Optional[Any] = int(self.size['''shortest_edge'''] * w / h ) else: __magic_name__ :Tuple = self.size['''shortest_edge'''] __magic_name__ :List[str] = self.size['''shortest_edge'''] else: __magic_name__ :Optional[int] = [] for image in image_inputs: __magic_name__ , __magic_name__ :Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __magic_name__ :List[str] = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] __magic_name__ :str = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = ConditionalDetrImageProcessor if is_vision_available() else None def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = ConditionalDetrImageProcessingTester(self ) @property def A ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def A ( self ): """simple docstring""" __magic_name__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) __magic_name__ :Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def A ( self ): """simple docstring""" pass def A ( self ): """simple docstring""" # Initialize image_processing __magic_name__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input __magic_name__ :int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __magic_name__ , __magic_name__ :Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ , __magic_name__ :int = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) __magic_name__ :int = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def A ( self ): """simple docstring""" # Initialize image_processing __magic_name__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input __magic_name__ :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __magic_name__ , __magic_name__ :Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ :Any = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values __magic_name__ , __magic_name__ :int = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def A ( self ): """simple docstring""" # Initialize image_processing __magic_name__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input __magic_name__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __magic_name__ , __magic_name__ :Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ :Any = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values __magic_name__ , __magic_name__ :Any = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def A ( self ): """simple docstring""" # prepare image and target __magic_name__ :Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __magic_name__ :str = json.loads(f.read() ) __magic_name__ :List[Any] = {'''image_id''': 3_9_7_6_9, '''annotations''': target} # encode them __magic_name__ :Optional[int] = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) __magic_name__ :Any = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values __magic_name__ :Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) __magic_name__ :Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area __magic_name__ :Any = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes __magic_name__ :Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) __magic_name__ :Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id __magic_name__ :Optional[Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd __magic_name__ :Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels __magic_name__ :List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size __magic_name__ :List[str] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size __magic_name__ :Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def A ( self ): """simple docstring""" # prepare image, target and masks_path __magic_name__ :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __magic_name__ :str = json.loads(f.read() ) __magic_name__ :int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target} __magic_name__ :int = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __magic_name__ :str = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) __magic_name__ :int = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values __magic_name__ :Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) __magic_name__ :Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area __magic_name__ :List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes __magic_name__ :Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) __magic_name__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id __magic_name__ :Tuple = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd __magic_name__ :Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels __magic_name__ :Optional[int] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks __magic_name__ :str = 8_2_2_8_7_3 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size __magic_name__ :Dict = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size __magic_name__ :Tuple = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
0
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ): A : Optional[int] = round(val / multiple ) * multiple if max_val is not None and x > max_val: A : Optional[Any] = math.floor(val / multiple ) * multiple if x < min_val: A : Any = math.ceil(val / multiple ) * multiple return x A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size A , A : List[Any] = get_image_size(_lowerCAmelCase ) A , A : List[Any] = output_size # determine new height and width A : Optional[int] = output_height / input_height A : Optional[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width A : Any = scale_width else: # fit height A : int = scale_height A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase ) A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[int] = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : int = size if size is not None else {"""height""": 384, """width""": 384} A : str = get_size_dict(lowerCamelCase__ ) A : Optional[Any] = do_resize A : Optional[int] = size A : Union[str, Any] = keep_aspect_ratio A : int = ensure_multiple_of A : Dict = resample A : Optional[Any] = do_rescale A : Any = rescale_factor A : str = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Dict = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) A : Optional[Any] = get_resize_output_image_size( lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, ) return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__ ) A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of A : Tuple = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : int = rescale_factor if rescale_factor is not None else self.rescale_factor A : int = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : Optional[int] = image_std if image_std is not None else self.image_std A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : str = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(lowerCamelCase__ ): A : int = target_sizes.numpy() A : Union[str, Any] = [] for idx in range(len(lowerCamelCase__ ) ): A : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ ) A : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase__ ) else: A : List[str] = logits.argmax(dim=1 ) A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
662
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def snake_case_ ( self: int,A_: Tuple=0 ): '''simple docstring''' __UpperCamelCase = floats_tensor((1, 3, 128, 128),rng=random.Random(A_ ) ) __UpperCamelCase = np.random.RandomState(A_ ) __UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.7_5, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations __UpperCamelCase = pipe(**self.get_dummy_inputs() ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='CPUExecutionProvider' ) __UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = self.get_dummy_inputs() __UpperCamelCase = pipe(**A_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCamelCase = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __lowerCamelCase (unittest.TestCase ): @property def snake_case_ ( self: Any ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = ort.SessionOptions() __UpperCamelCase = False return options def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __UpperCamelCase = init_image.resize((768, 512) ) # using the PNDM scheduler by default __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4',revision='onnx',safety_checker=A_,feature_extractor=A_,provider=self.gpu_provider,sess_options=self.gpu_options,) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = 'A fantasy landscape, trending on artstation' __UpperCamelCase = np.random.RandomState(0 ) __UpperCamelCase = pipe( prompt=A_,image=A_,strength=0.7_5,guidance_scale=7.5,num_inference_steps=10,generator=A_,output_type='np',) __UpperCamelCase = output.images __UpperCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCamelCase = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __UpperCamelCase = init_image.resize((768, 512) ) __UpperCamelCase = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5',subfolder='scheduler',revision='onnx' ) __UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5',revision='onnx',scheduler=A_,safety_checker=A_,feature_extractor=A_,provider=self.gpu_provider,sess_options=self.gpu_options,) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = 'A fantasy landscape, trending on artstation' __UpperCamelCase = np.random.RandomState(0 ) __UpperCamelCase = pipe( prompt=A_,image=A_,strength=0.7_5,guidance_scale=7.5,num_inference_steps=20,generator=A_,output_type='np',) __UpperCamelCase = output.images __UpperCamelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCamelCase = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
1
class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__ ): # we need a list not a string, so do something to change the type A : List[Any] = arr.split(""",""" ) def _lowerCAmelCase ( self ): A : int = [int(self.array[0] )] * len(self.array ) A : Optional[Any] = [int(self.array[0] )] * len(self.array ) for i in range(1, len(self.array ) ): A : Union[str, Any] = max( int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) ) A : Dict = max(sum_value[i], rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""") SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array) SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array() print(("""the results is:""", re))
662
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def SCREAMING_SNAKE_CASE_ ( ) -> Any: _A = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=_snake_case , default=_snake_case , required=_snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=_snake_case , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=_snake_case , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=_snake_case , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=_snake_case , default=0 , help='''cuda_id.''' , ) _A = parser.parse_args() return args def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Dict , _snake_case :Any ) -> List[Any]: if not len(_snake_case ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) _A , _A = imgs[0].size _A = Image.new('''RGB''' , size=(cols * w, rows * h) ) _A , _A = grid.size for i, img in enumerate(_snake_case ): grid.paste(_snake_case , box=(i % cols * w, i // cols * h) ) return grid def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :Union[str, Any]="robotic cat with wings" , _snake_case :List[str]=7.5 , _snake_case :Optional[int]=50 , _snake_case :List[str]=1 , _snake_case :List[str]=42 , ) -> List[str]: _A = torch.Generator(pipeline.device ).manual_seed(_snake_case ) _A = pipeline( _snake_case , guidance_scale=_snake_case , num_inference_steps=_snake_case , generator=_snake_case , num_images_per_prompt=_snake_case , ).images _A = int(math.sqrt(_snake_case ) ) _A = image_grid(_snake_case , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images UpperCAmelCase_ = parse_args() # Load models and create wrapper for stable diffusion UpperCAmelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""") UpperCAmelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""") UpperCAmelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""") UpperCAmelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""") UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) UpperCAmelCase_ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")): UpperCAmelCase_ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, """unet""", unet) else: UpperCAmelCase_ = unet.to(torch.device("""cuda""", args.cuda_id)) UpperCAmelCase_ = pipeline.to(unet.device) UpperCAmelCase_ ,UpperCAmelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split())))) UpperCAmelCase_ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
2
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:List[Any] = { """google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[Any] = "bit" __lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"] __lowerCamelCase : Union[str, Any] = ["SAME", "VALID"] def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A : List[Any] = global_padding.upper() else: raise ValueError(f'''Padding strategy {global_padding} not supported''' ) A : Dict = num_channels A : List[Any] = embedding_size A : Optional[Any] = hidden_sizes A : str = depths A : str = layer_type A : Union[str, Any] = hidden_act A : Any = global_padding A : Optional[int] = num_groups A : Dict = drop_path_rate A : List[Any] = embedding_dynamic_padding A : List[Any] = output_stride A : Union[str, Any] = width_factor A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )] A , A : Any = get_aligned_output_features_output_indices( out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
662
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } lowerCAmelCase : Union[str, Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def A_( A : Union[str, Any] , A : List[str] , A : Dict , A : Tuple , A : Any): for attribute in key.split('.'): UpperCamelCase = getattr(A , A) if weight_type is not None: UpperCamelCase = getattr(A , A).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''') if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "inv_freq": UpperCamelCase = value else: UpperCamelCase = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''') def A_( A : int , A : Union[str, Any] , A : str): UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]: UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(A)[0].split('.')[-2] UpperCamelCase = mapped_key.replace('*' , A) if "pos_bias_u" in name: UpperCamelCase = None elif "pos_bias_v" in name: UpperCamelCase = None elif "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "inv_freq" in name: UpperCamelCase = 'inv_freq' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(A , A , A , A , A) continue if not is_used: unused_weights.append(A) logger.warning(f'''Unused weights: {unused_weights}''') def A_( A : str , A : int , A : int , A : List[str] , A : Tuple): UpperCamelCase = full_name.split('conv_layers.')[-1] UpperCamelCase = name.split('.') UpperCamelCase = int(items[0]) UpperCamelCase = int(items[1]) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''') UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''') UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''') UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''') UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') else: unused_weights.append(A) @torch.no_grad() def A_( A : Dict , A : List[str] , A : Any=None , A : str=None , A : Optional[int]=True): if config_path is not None: UpperCamelCase = WavaVecaConformerConfig.from_pretrained(A , hidden_act='swish') else: UpperCamelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase = 'rotary' if is_finetuned: if dict_path: UpperCamelCase = Dictionary.load(A) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase = target_dict.pad_index UpperCamelCase = target_dict.bos_index UpperCamelCase = target_dict.eos_index UpperCamelCase = len(target_dict.symbols) UpperCamelCase = os.path.join(A , 'vocab.json') if not os.path.isdir(A): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A)) return os.makedirs(A , exist_ok=A) UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase = 0 UpperCamelCase = 1 with open(A , 'w' , encoding='utf-8') as vocab_handle: json.dump(A , A) UpperCamelCase = WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , ) UpperCamelCase = True if config.feat_extract_norm == 'layer' else False UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) UpperCamelCase = WavaVecaProcessor(feature_extractor=A , tokenizer=A) processor.save_pretrained(A) UpperCamelCase = WavaVecaConformerForCTC(A) else: UpperCamelCase = WavaVecaConformerForPreTraining(A) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])}) else: UpperCamelCase = argparse.Namespace(task='audio_pretraining') UpperCamelCase = fairseq.tasks.setup_task(A) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A) UpperCamelCase = model[0].eval() recursively_load_weights(A , A , not is_finetuned) hf_wavavec.save_pretrained(A) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ): A : List[str] = parent A : List[str] = batch_size A : Optional[int] = seq_length A : Optional[int] = is_training A : Tuple = use_input_mask A : Optional[Any] = vocab_size A : str = hidden_size A : Any = num_hidden_layers A : List[Any] = num_attention_heads A : Optional[int] = intermediate_size A : int = hidden_act A : Dict = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : List[Any] = max_position_embeddings A : int = initializer_range A : Tuple = use_labels A : List[str] = scope def _lowerCAmelCase ( self ): A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : int = None if self.use_input_mask: A : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def _lowerCAmelCase ( self ): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, ) def _lowerCAmelCase ( self ): ( ( A ) , ( A ) , ( A ) , ( A ) , ) : List[Any] = self.prepare_config_and_inputs() A : Any = True A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : str = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ ) A : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, ) A : Optional[Any] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : Union[str, Any] = True A : Optional[int] = True A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval() # first forward pass A : int = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, ) A : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size ) A : int = ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 ) A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 ) A : List[str] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] # select random slice A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item() A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() A : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ): A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self ): A , A , A , A : str = self.prepare_config_and_inputs() A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else () __lowerCamelCase : List[Any] = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def _lowerCAmelCase ( self ): A : Any = BertGenerationEncoderTester(self ) A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 ) def _lowerCAmelCase ( self ): self.config_tester.run_common_tests() def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs() A : Any = """bert""" self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): # This regression test was failing with PyTorch < 1.3 ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() A : int = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ) def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def _lowerCAmelCase ( self ): A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Union[str, Any] = model(lowerCamelCase__ )[0] A : List[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Tuple = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Dict = model(lowerCamelCase__ )[0] A : List[str] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Optional[Any] = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
662
0
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ): lowerCAmelCase = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F'{test_file} instead.' ) lowerCAmelCase = components[-1] if not test_fn.endswith('py' ): raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' ) lowerCAmelCase = components[:-1] + [test_fn.replace('.py' , '' )] lowerCAmelCase = '.'.join(_UpperCAmelCase ) return test_module_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ): lowerCAmelCase = get_module_path(_UpperCAmelCase ) lowerCAmelCase = importlib.import_module(_UpperCAmelCase ) return test_module def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): lowerCAmelCase = [] lowerCAmelCase = get_test_module(_UpperCAmelCase ) for attr in dir(_UpperCAmelCase ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_UpperCAmelCase , _UpperCAmelCase ) ) # sort with class names return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x.__name__ ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = [] lowerCAmelCase = get_test_module(_UpperCAmelCase ) for attr in dir(_UpperCAmelCase ): lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). lowerCAmelCase = getattr(_UpperCAmelCase , 'all_model_classes' , [] ) if len(_UpperCAmelCase ) > 0: test_classes.append(_UpperCAmelCase ) # sort with class names return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x.__name__ ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): lowerCAmelCase = get_test_classes(_UpperCAmelCase ) lowerCAmelCase = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x.__name__ ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): lowerCAmelCase = test_class() if hasattr(_UpperCAmelCase , 'setUp' ): test.setUp() lowerCAmelCase = None if hasattr(_UpperCAmelCase , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: lowerCAmelCase = test.model_tester.__class__ return model_tester def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = get_test_classes(_UpperCAmelCase ) lowerCAmelCase = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_UpperCAmelCase ) # sort with class names return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x.__name__ ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : int ): lowerCAmelCase = get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = [] for test_class in test_classes: lowerCAmelCase = get_model_tester_from_test_class(_UpperCAmelCase ) if tester_class is not None: tester_classes.append(_UpperCAmelCase ) # sort with class names return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x.__name__ ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): lowerCAmelCase = get_test_classes(_UpperCAmelCase ) lowerCAmelCase = {test_class: get_model_tester_from_test_class(_UpperCAmelCase ) for test_class in test_classes} return test_tester_mapping def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): lowerCAmelCase = get_model_classes(_UpperCAmelCase ) lowerCAmelCase = { model_class: get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase ) for model_class in model_classes } return model_test_mapping def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ): lowerCAmelCase = get_model_classes(_UpperCAmelCase ) lowerCAmelCase = { model_class: get_tester_classes_for_model(_UpperCAmelCase , _UpperCAmelCase ) for model_class in model_classes } return model_to_tester_mapping def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): return o elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): return o.__name__ elif isinstance(_UpperCAmelCase , (list, tuple) ): return [to_json(_UpperCAmelCase ) for x in o] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): return {to_json(_UpperCAmelCase ): to_json(_UpperCAmelCase ) for k, v in o.items()} else: return o
4
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : str = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384} A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Optional[Any] = do_resize A : Dict = size # Default value set here for backwards compatibility where the value in config is None A : Dict = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : List[str] = do_rescale A : Tuple = rescale_factor A : Optional[int] = do_normalize A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) A : List[str] = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : int = int(shortest_edge / crop_pct ) A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Dict = do_resize if do_resize is not None else self.do_resize A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct A : str = resample if resample is not None else self.resample A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor A : Dict = do_normalize if do_normalize is not None else self.do_normalize A : List[str] = image_mean if image_mean is not None else self.image_mean A : Optional[Any] = image_std if image_std is not None else self.image_std A : Optional[Any] = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Dict = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
662
0
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowercase : str = VideoToVideoSDPipeline _lowercase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''} _lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''} _lowercase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''} _lowercase : Any = False # No `output_type`. _lowercase : Tuple = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) _lowerCAmelCase = CLIPTextModel(_lowercase ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def _lowercase ( self , _lowercase , _lowercase=0 ): """simple docstring""" _lowerCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase ) if str(_lowercase ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(_lowercase ) else: _lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """video""": video, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = VideoToVideoSDPipeline(**_lowercase ) _lowerCAmelCase = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = self.get_dummy_inputs(_lowercase ) _lowerCAmelCase = """np""" _lowerCAmelCase = sd_pipe(**_lowercase ).frames _lowerCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) _lowerCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _lowercase ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=5e-3 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase = torch.randn((1, 10, 3, 1_024, 576) , generator=_lowercase ) _lowerCAmelCase = video.to("""cuda""" ) _lowerCAmelCase = """Spiderman is surfing""" _lowerCAmelCase = pipe(_lowercase , video=_lowercase , generator=_lowercase , num_inference_steps=3 , output_type="""pt""" ).frames _lowerCAmelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
5
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" A : Dict = """backbone.""" if is_semantic else """""" A : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', """beit.embeddings.cls_token"""), (f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""), (f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""), (f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("""mask_token""", """beit.embeddings.mask_token"""), ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) else: # layernorm + classification head rename_keys.extend( [ ("""fc_norm.weight""", """beit.pooler.layernorm.weight"""), ("""fc_norm.bias""", """beit.pooler.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" for i in range(config.num_hidden_layers ): A : Dict = """backbone.""" if is_semantic else """""" # queries, keys and values A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) A : int = in_proj_weight[ : config.hidden_size, : ] A : Any = q_bias A : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A : Tuple = in_proj_weight[ -config.hidden_size :, : ] A : Union[str, Any] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) A : Dict = gamma_a A : Dict = gamma_a def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: """simple docstring""" A : List[str] = dct.pop(_lowerCAmelCase ) A : Optional[Any] = val def __UpperCamelCase ( ) -> List[str]: """simple docstring""" A : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str: """simple docstring""" A : Dict = False if """rvlcdip""" in checkpoint_url else True A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: A : Dict = 1024 A : List[Any] = 4096 A : int = 24 A : int = 16 # labels if "rvlcdip" in checkpoint_url: A : List[Any] = 16 A : List[Any] = """huggingface/label-files""" A : int = """rvlcdip-id2label.json""" A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} A : int = idalabel A : Union[str, Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""] A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase ) # load HuggingFace model A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase ) model.eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A : Any = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase ) A : int = prepare_img() A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) A : str = encoding["""pixel_values"""] A : Tuple = model(_lowerCAmelCase ) A : Optional[int] = outputs.logits # verify logits A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: if has_lm_head: A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large""" else: A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip""" image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
662
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCamelCase = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""", lowerCamelCase__, ) super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
662
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = '''speech_to_text''' UpperCAmelCase : List[Any] = ['''past_key_values'''] UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ): _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(_UpperCAmelCase ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
7
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, ) A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths} A : str = Text( cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, ) def _lowerCAmelCase ( self ): # Build iterable dataset if self.streaming: A : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A : List[str] = None A : Dict = None A : Tuple = None A : Tuple = None self.builder.download_and_prepare( download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, ) A : List[str] = self.builder.as_dataset( split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory ) return dataset
662
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[Any] = { '''microsoft/swinv2-tiny-patch4-window8-256''': ( '''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''swinv2''' lowerCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=96 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[3, 6, 12, 24] , _UpperCAmelCase=7 , _UpperCAmelCase=4.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=32 , **_UpperCAmelCase , ): '''simple docstring''' super().__init__(**_UpperCAmelCase) __A : Dict = image_size __A : Optional[int] = patch_size __A : int = num_channels __A : Tuple = embed_dim __A : Dict = depths __A : str = len(_UpperCAmelCase) __A : int = num_heads __A : Optional[int] = window_size __A : int = mlp_ratio __A : Optional[Any] = qkv_bias __A : Dict = hidden_dropout_prob __A : Union[str, Any] = attention_probs_dropout_prob __A : Any = drop_path_rate __A : List[Any] = hidden_act __A : Optional[Any] = use_absolute_embeddings __A : List[Any] = layer_norm_eps __A : Union[str, Any] = initializer_range __A : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __A : List[str] = int(embed_dim * 2 ** (len(_UpperCAmelCase) - 1)) __A : Dict = (0, 0, 0, 0)
8
from typing import TYPE_CHECKING from ....utils import _LazyModule SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
662
0
import os import numpy import onnx def A ( __UpperCamelCase , __UpperCamelCase ) -> int: A__ = a.name A__ = b.name A__ = '' A__ = '' A__ = a == b A__ = name_a A__ = name_b return res def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__UpperCamelCase , __UpperCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , __UpperCamelCase , __UpperCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: for n in graph_proto.node: _node_replace_input_with(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: A__ = list(model.graph.initializer ) A__ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i A__ = inits[i].name A__ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase ) -> Tuple: A__ = os.path.dirname(__UpperCamelCase ) A__ = os.path.basename(__UpperCamelCase ) A__ = onnx.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) ) A__ = list(model.graph.initializer ) A__ = set() A__ = {} A__ = [] A__ = 0 for i in range(len(__UpperCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(__UpperCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__UpperCamelCase ) dup_set.add(__UpperCamelCase ) A__ = inits[j].data_type A__ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __UpperCamelCase ) total_reduced_size += mem_size A__ = inits[i].name A__ = inits[j].name if name_i in dup_map: dup_map[name_i].append(__UpperCamelCase ) else: A__ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' ) A__ = sorted(__UpperCamelCase ) _remove_dup_initializers_from_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = 'optimized_' + model_file_name A__ = os.path.join(__UpperCamelCase , __UpperCamelCase ) onnx.save(__UpperCamelCase , __UpperCamelCase ) return new_model
9
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int: """simple docstring""" A , A : str = 1, 1 A : List[Any] = [] for i in range(1 , n + 1 ): A : Optional[int] = prev_numerator + 2 * prev_denominator A : Any = prev_numerator + prev_denominator if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ): result.append(_lowerCAmelCase ) A : int = numerator A : int = denominator return len(_lowerCAmelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
662
0
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (UnCLIPScheduler,) def UpperCamelCase_ ( self : Any , **_A : int ): _UpperCamelCase = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**_A ) return config def UpperCamelCase_ ( self : Tuple ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : int ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_A ) def UpperCamelCase_ ( self : Tuple ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def UpperCamelCase_ ( self : Optional[Any] ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : str ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_A , prev_timestep=_A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(variance_type='''fixed_small_log''' ) _UpperCamelCase = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5 def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(variance_type='''learned_range''' ) _UpperCamelCase = scheduler_class(**_A ) _UpperCamelCase = 0.5 assert scheduler._get_variance(1 , predicted_variance=_A ) - -10.171_2790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_A ) - -5.799_8052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_A ) - -0.001_0011 < 1e-5 def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) _UpperCamelCase = scheduler.timesteps _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter _UpperCamelCase = torch.manual_seed(0 ) for i, t in enumerate(_A ): # 1. predict noise residual _UpperCamelCase = model(_A , _A ) # 2. predict previous mean of sample x_t-1 _UpperCamelCase = scheduler.step(_A , _A , _A , generator=_A ).prev_sample _UpperCamelCase = pred_prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1e-2 assert abs(result_mean.item() - 0.328_4743 ) < 1e-3 def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(25 ) _UpperCamelCase = scheduler.timesteps _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter _UpperCamelCase = torch.manual_seed(0 ) for i, t in enumerate(_A ): # 1. predict noise residual _UpperCamelCase = model(_A , _A ) if i + 1 == timesteps.shape[0]: _UpperCamelCase = None else: _UpperCamelCase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _UpperCamelCase = scheduler.step( _A , _A , _A , prev_timestep=_A , generator=_A ).prev_sample _UpperCamelCase = pred_prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1e-2 assert abs(result_mean.item() - 0.336_2038 ) < 1e-3 def UpperCamelCase_ ( self : Any ): pass def UpperCamelCase_ ( self : Dict ): pass
10
import re def __UpperCamelCase ( _lowerCAmelCase ) -> str: """simple docstring""" if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
662
0
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib lowercase_ = threading.Lock() lowercase_ = None lowercase_ = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } lowercase_ = logging.WARNING lowercase_ = True def lowerCAmelCase (): """simple docstring""" _a = os.getenv('''TRANSFORMERS_VERBOSITY''' , __A) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ''' F'''has to be one of: { ', '.join(log_levels.keys()) }''') return _default_log_level def lowerCAmelCase (): """simple docstring""" return __name__.split('''.''')[0] def lowerCAmelCase (): """simple docstring""" return logging.getLogger(_get_library_name()) def lowerCAmelCase (): """simple docstring""" global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _a = logging.StreamHandler() # Set sys.stderr as stream. _a = sys.stderr.flush # Apply our default configuration to the library root logger. _a = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) _a = False def lowerCAmelCase (): """simple docstring""" global _default_handler with _lock: if not _default_handler: return _a = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _a = None def lowerCAmelCase (): """simple docstring""" return log_levels def lowerCAmelCase (__A = None): """simple docstring""" if name is None: _a = _get_library_name() _configure_library_root_logger() return logging.getLogger(__A) def lowerCAmelCase (): """simple docstring""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def lowerCAmelCase (__A): """simple docstring""" _configure_library_root_logger() _get_library_root_logger().setLevel(__A) def lowerCAmelCase (): """simple docstring""" return set_verbosity(__A) def lowerCAmelCase (): """simple docstring""" return set_verbosity(__A) def lowerCAmelCase (): """simple docstring""" return set_verbosity(__A) def lowerCAmelCase (): """simple docstring""" return set_verbosity(__A) def lowerCAmelCase (): """simple docstring""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def lowerCAmelCase (): """simple docstring""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def lowerCAmelCase (__A): """simple docstring""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(__A) def lowerCAmelCase (__A): """simple docstring""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(__A) def lowerCAmelCase (): """simple docstring""" _configure_library_root_logger() _a = False def lowerCAmelCase (): """simple docstring""" _configure_library_root_logger() _a = True def lowerCAmelCase (): """simple docstring""" _a = _get_library_root_logger().handlers for handler in handlers: _a = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''') handler.setFormatter(__A) def lowerCAmelCase (): """simple docstring""" _a = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(__A) def lowerCAmelCase (self , *__A , **__A): """simple docstring""" _a = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __A) if no_advisory_warnings: return self.warning(*__A , **__A) lowercase_ = warning_advice @functools.lru_cache(__A) def lowerCAmelCase (self , *__A , **__A): """simple docstring""" self.warning(*__A , **__A) lowercase_ = warning_once class __A : '''simple docstring''' def __init__(self , *A , **A ) -> str: # pylint: disable=unused-argument """simple docstring""" _a = args[0] if args else None def __iter__(self ) -> Optional[int]: """simple docstring""" return iter(self._iterator ) def __getattr__(self , A ) -> Optional[int]: """simple docstring""" def empty_fn(*A , **A ): # pylint: disable=unused-argument return return empty_fn def __enter__(self ) -> List[Any]: """simple docstring""" return self def __exit__(self , A , A , A ) -> Union[str, Any]: """simple docstring""" return class __A : '''simple docstring''' def __call__(self , *A , **A ) -> Optional[Any]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm(*A , **A ) else: return EmptyTqdm(*A , **A ) def a__ (self , *A , **A ) -> Optional[int]: """simple docstring""" _a = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*A , **A ) def a__ (self ) -> Optional[int]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() lowercase_ = _tqdm_cls() def lowerCAmelCase (): """simple docstring""" global _tqdm_active return bool(_tqdm_active) def lowerCAmelCase (): """simple docstring""" global _tqdm_active _a = True hf_hub_utils.enable_progress_bars() def lowerCAmelCase (): """simple docstring""" global _tqdm_active _a = False hf_hub_utils.disable_progress_bars()
11
from __future__ import annotations SCREAMING_SNAKE_CASE_:Tuple = """#""" class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self ): A : dict = {} def _lowerCAmelCase ( self, lowerCamelCase__ ): A : List[Any] = self._trie for char in text: if char not in trie: A : str = {} A : str = trie[char] A : Optional[int] = True def _lowerCAmelCase ( self, lowerCamelCase__ ): A : Dict = self._trie for char in prefix: if char in trie: A : Optional[Any] = trie[char] else: return [] return self._elements(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__ ): A : int = [] for c, v in d.items(): A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )] result.extend(lowerCamelCase__ ) return tuple(lowerCamelCase__ ) SCREAMING_SNAKE_CASE_:Any = Trie() SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def __UpperCamelCase ( _lowerCAmelCase ) -> tuple: """simple docstring""" A : List[str] = trie.find_word(_lowerCAmelCase ) return tuple(string + word for word in suffixes ) def __UpperCamelCase ( ) -> None: """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
662
0
from __future__ import annotations from typing import TypedDict class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : str __lowerCAmelCase : int def UpperCamelCase ( lowercase_ ) -> list[str]: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(lowercase_ ) )] def UpperCamelCase ( lowercase_ ) -> BWTTransformDict: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) lowercase__ : List[str] = all_rotations(lowercase_ ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation lowercase__ : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(lowercase_ ), } return response def UpperCamelCase ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: lowercase__ : Optional[Any] = int(lowercase_ ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(lowercase_ ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) lowercase__ : str = [""""""] * len(lowercase_ ) for _ in range(len(lowercase_ ) ): for i in range(len(lowercase_ ) ): lowercase__ : List[Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase__ : Tuple = """Provide a string that I will generate its BWT transform: """ lowerCamelCase__ : Dict = input(entry_msg).strip() lowerCamelCase__ : int = bwt_transform(s) print( f'''Burrows Wheeler transform for string \'{s}\' results ''' f'''in \'{result["bwt_string"]}\'''' ) lowerCamelCase__ : List[str] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ''' f'''we get original string \'{original_string}\'''' )
12
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = bnb_quantization_config.load_in_abit A : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) A : Any = [] # custom device map if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1: A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A : int = get_keys_to_not_convert(_lowerCAmelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_lowerCAmelCase ) A : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A : Dict = [] A : Tuple = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_lowerCAmelCase ) # compatibility with peft A : Union[str, Any] = load_in_abit A : Tuple = load_in_abit A : List[str] = get_parameter_device(_lowerCAmelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase ) # convert param to the right dtype A : Tuple = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_lowerCAmelCase ): param.to(_lowerCAmelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): A : str = replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase ) A : Optional[Any] = get_quantized_model_device_map( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A : Tuple = True A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]: """simple docstring""" if device_map is None: if torch.cuda.is_available(): A : Optional[int] = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) A : Tuple = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A : Any = {} A : List[str] = special_dtypes A : Any = no_split_module_classes A : Union[str, Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A : Tuple = get_balanced_memory( _lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , ) A : int = max_memory A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # check if don't have any quantized module on the cpu A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A : Optional[int] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: """simple docstring""" if modules_to_not_convert is None: A : Optional[Any] = [] A , A : Dict = _replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int: """simple docstring""" A : Optional[int] = False for name, module in model.named_children(): if current_key_name is None: A : int = [] current_key_name.append(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A : Dict = """.""".join(_lowerCAmelCase ) A : Optional[Any] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A : Dict = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A : Optional[Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: A : Dict = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) A : Any = module.weight.data if module.bias is not None: A : Any = module.bias.data bnb_module.requires_grad_(_lowerCAmelCase ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Dict = True if len(list(module.children() ) ) > 0: A , A : Dict = _replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" with init_empty_weights(): A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A : Optional[int] = find_tied_parameters(_lowerCAmelCase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowerCAmelCase , _lowerCAmelCase ): A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A : Optional[int] = sum(_lowerCAmelCase , [] ) A : Tuple = len(_lowerCAmelCase ) > 0 # Check if it is a base model A : List[str] = False if hasattr(_lowerCAmelCase , """base_model_prefix""" ): A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A : str = list(model.named_children() ) A : Tuple = [list_modules[-1][0]] # add last module together with tied weights A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase ) # remove ".weight" from the keys A : Union[str, Any] = [""".weight""", """.bias"""] A : Optional[int] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A : List[str] = name.replace(_lowerCAmelCase , """""" ) filtered_module_names.append(_lowerCAmelCase ) return filtered_module_names def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]: """simple docstring""" for m in model.modules(): if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ): return True return False def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]: """simple docstring""" return next(parameter.parameters() ).device def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase ) A : Tuple = param_name A : Union[str, Any] = model if "." in tensor_name: A : int = tensor_name.split(""".""" ) for split in splits[:-1]: A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) A : Optional[Any] = new_module A : List[str] = splits[-1] # offload weights A : Optional[int] = False offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , ) else: offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase ) offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase ) set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
662
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : str = logging.get_logger(__name__) A__ : int = { """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = 'sew' def __init__( self , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="group" , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="mean" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = hidden_size __lowerCamelCase : int = feat_extract_norm __lowerCamelCase : Optional[int] = feat_extract_activation __lowerCamelCase : Any = list(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = list(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = conv_bias __lowerCamelCase : Dict = num_conv_pos_embeddings __lowerCamelCase : Optional[Any] = num_conv_pos_embedding_groups __lowerCamelCase : Dict = len(self.conv_dim ) __lowerCamelCase : Optional[Any] = num_hidden_layers __lowerCamelCase : Tuple = intermediate_size __lowerCamelCase : List[Any] = squeeze_factor __lowerCamelCase : List[str] = hidden_act __lowerCamelCase : Dict = num_attention_heads __lowerCamelCase : Dict = hidden_dropout __lowerCamelCase : Tuple = attention_dropout __lowerCamelCase : Dict = activation_dropout __lowerCamelCase : Optional[int] = feat_proj_dropout __lowerCamelCase : Tuple = final_dropout __lowerCamelCase : str = layerdrop __lowerCamelCase : int = layer_norm_eps __lowerCamelCase : int = initializer_range __lowerCamelCase : Optional[int] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase : Optional[Any] = apply_spec_augment __lowerCamelCase : Tuple = mask_time_prob __lowerCamelCase : Any = mask_time_length __lowerCamelCase : int = mask_time_min_masks __lowerCamelCase : int = mask_feature_prob __lowerCamelCase : Dict = mask_feature_length __lowerCamelCase : List[str] = mask_feature_min_masks # ctc loss __lowerCamelCase : Any = ctc_loss_reduction __lowerCamelCase : List[Any] = ctc_zero_infinity # sequence classification __lowerCamelCase : List[Any] = use_weighted_layer_sum __lowerCamelCase : Union[str, Any] = classifier_proj_size @property def lowercase_ ( self ) -> List[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
13
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def __UpperCamelCase ( ) -> Dict: """simple docstring""" A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(_lowerCAmelCase ) DownloadCommand.register_subcommand(_lowerCAmelCase ) EnvironmentCommand.register_subcommand(_lowerCAmelCase ) RunCommand.register_subcommand(_lowerCAmelCase ) ServeCommand.register_subcommand(_lowerCAmelCase ) UserCommands.register_subcommand(_lowerCAmelCase ) AddNewModelCommand.register_subcommand(_lowerCAmelCase ) AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase ) LfsCommands.register_subcommand(_lowerCAmelCase ) PTtoTFCommand.register_subcommand(_lowerCAmelCase ) # Let's go A : Tuple = parser.parse_args() if not hasattr(_lowerCAmelCase , """func""" ): parser.print_help() exit(1 ) # Run A : Any = args.func(_lowerCAmelCase ) service.run() if __name__ == "__main__": main()
662
0
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets a__ = datasets.logging.get_logger(__name__) a__ = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' a__ = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' a__ = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : str=False ,__a : Dict=False ,__a : Any=True ,__a : int=False ,__a : Union[str, Any]="dummy_doc" ) -> List[Any]: """simple docstring""" _a : int = {doc: key_lines} _a : Dict = {doc: sys_lines} _a : Tuple = {} _a : Dict = 0 _a : List[str] = 0 _a : List[Any] = 0 _a : Dict = 0 _a : int = 0 _a : int = 0 _a , _a : Optional[Any] = reader.get_doc_mentions(__a ,key_doc_lines[doc] ,__a ) key_singletons_num += singletons_num if NP_only or min_span: _a : Optional[int] = reader.set_annotated_parse_trees(__a ,key_doc_lines[doc] ,__a ,__a ) _a , _a : List[Any] = reader.get_doc_mentions(__a ,sys_doc_lines[doc] ,__a ) sys_singletons_num += singletons_num if NP_only or min_span: _a : Any = reader.set_annotated_parse_trees(__a ,key_doc_lines[doc] ,__a ,__a ) if remove_nested: _a , _a : str = reader.remove_nested_coref_mentions(__a ,__a ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _a , _a : Optional[Any] = reader.remove_nested_coref_mentions(__a ,__a ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _a : Dict = reader.get_mention_assignments(__a ,__a ) _a : List[str] = reader.get_mention_assignments(__a ,__a ) _a : List[str] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( '''Number of resulting singleton clusters in the key ''' F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ '''files, respectively''' ) return doc_coref_infos def __UpperCAmelCase ( __a : Any ,__a : List[str] ,__a : Optional[Any] ,__a : Any ,__a : Optional[int] ,__a : List[str] ,__a : int ) -> List[Any]: """simple docstring""" _a : Optional[int] = get_coref_infos(__a ,__a ,__a ,__a ,__a ,__a ) _a : List[str] = {} _a : Union[str, Any] = 0 _a : Union[str, Any] = 0 for name, metric in metrics: _a , _a , _a : int = evaluator.evaluate_documents(__a ,__a ,beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) ,F"""Recall: {recall * 100:.2f}""" ,F""" Precision: {precision * 100:.2f}""" ,F""" F1: {fa * 100:.2f}""" ,) if conll_subparts_num == 3: _a : int = (conll / 3) * 100 logger.info(F"""CoNLL score: {conll:.2f}""" ) output_scores.update({'''conll_score''': conll} ) return output_scores def __UpperCAmelCase ( __a : int ) -> List[Any]: """simple docstring""" _a : List[Any] = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: _a : Any = line.split()[5] if not parse_col == "-": _a : Any = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def __lowercase ( self , _a , _a , _a=True , _a=False , _a=False , _a=False ) -> Any: _a : List[Any] = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: _a : Any = util.check_gold_parse_annotation(_a ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _a : Union[str, Any] = evaluate( key_lines=_a , sys_lines=_a , metrics=_a , NP_only=_a , remove_nested=_a , keep_singletons=_a , min_span=_a , ) return score
14
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_:int = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Optional[int] = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Any = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
662
0
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A : '''simple docstring''' def __init__(self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , _UpperCAmelCase : str=[2, 2, 3, 2] , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : int=["stage2", "stage3", "stage4"] , _UpperCAmelCase : List[Any]=[2, 3, 4] , _UpperCAmelCase : int=None , ) -> List[str]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = num_stages lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = is_training lowercase__ = use_labels lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = num_labels lowercase__ = initializer_range lowercase__ = out_features lowercase__ = out_indices lowercase__ = scope def lowerCamelCase__ (self : int ) -> Union[str, Any]: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ = self.get_config() return config, pixel_values, labels def lowerCamelCase__ (self : Any ) -> Dict: """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ) -> Union[str, Any]: """simple docstring""" lowercase__ = ConvNextModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__ = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase__ (self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = ConvNextForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = ConvNextBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__ = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ = None lowercase__ = ConvNextBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__ = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase__ (self : str ) -> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' A__ = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) A__ = ( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) A__ = True A__ = False A__ = False A__ = False A__ = False def lowerCamelCase__ (self : int ) -> Any: """simple docstring""" lowercase__ = ConvNextModelTester(self ) lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def lowerCamelCase__ (self : Optional[Any] ) -> int: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]: """simple docstring""" return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def lowerCamelCase__ (self : Optional[int] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def lowerCamelCase__ (self : Dict ) -> str: """simple docstring""" pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def lowerCamelCase__ (self : Optional[Any] ) -> Any: """simple docstring""" pass def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def lowerCamelCase__ (self : Optional[Any] ) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCamelCase__ (self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) def lowerCamelCase__ (self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" def check_hidden_states_output(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ): lowercase__ = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : int ) -> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def lowerCamelCase__ (self : Tuple ) -> Optional[int]: """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ConvNextModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def UpperCamelCase ( ) -> List[Any]: """simple docstring""" lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase__ (self : Any ) -> Dict: """simple docstring""" return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def lowerCamelCase__ (self : Tuple ) -> Tuple: """simple docstring""" lowercase__ = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_UpperCAmelCase ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase ) # verify the logits lowercase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) lowercase__ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) @require_torch class A ( unittest.TestCase , UpperCAmelCase__ ): '''simple docstring''' A__ = (ConvNextBackbone,) if is_torch_available() else () A__ = ConvNextConfig A__ = False def lowerCamelCase__ (self : Any ) -> List[Any]: """simple docstring""" lowercase__ = ConvNextModelTester(self )
15
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]: """simple docstring""" A : Optional[int] = int(_lowerCAmelCase ) # Initialize Result A : int = [] # Traverse through all denomination for denomination in reversed(_lowerCAmelCase ): # Find denominations while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ): total_value -= int(_lowerCAmelCase ) answer.append(_lowerCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": SCREAMING_SNAKE_CASE_:List[Any] = [] SCREAMING_SNAKE_CASE_:Dict = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F"""Following is minimal change for {value}: """) SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
662
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DebertaTokenizer lowerCamelCase__ = True lowerCamelCase__ = DebertaTokenizerFast def _snake_case ( self : int ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "[UNK]", ] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE = {"unk_token": "[UNK]"} SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def _snake_case ( self : int , **__lowerCamelCase : Optional[Any] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = "lower newer" return input_text, output_text def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = tokenizer("Hello" , "World" ) SCREAMING_SNAKE_CASE = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["token_type_ids"] , __lowerCamelCase ) @slow def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("microsoft/deberta-base" ) SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode( "sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("microsoft/deberta-base" ) SCREAMING_SNAKE_CASE = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) for seq in encoding["input_ids"]] # fmt: off SCREAMING_SNAKE_CASE = { "input_ids": [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], "token_type_ids": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on SCREAMING_SNAKE_CASE = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] self.assertDictEqual(encoding.data , __lowerCamelCase ) for expected, decoded in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(__lowerCamelCase , __lowerCamelCase )
16
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab)))) SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname) SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""") SCREAMING_SNAKE_CASE_:str = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
662
0
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Tuple = { '''Visual-Attention-Network/van-base''': ( '''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json''' ), } class lowerCamelCase_ ( _lowercase ): _lowercase : Dict = '''van''' def __init__( self : Dict , __A : List[str]=224 , __A : Any=3 , __A : Any=[7, 3, 3, 3] , __A : List[str]=[4, 2, 2, 2] , __A : Optional[Any]=[64, 128, 320, 512] , __A : Tuple=[3, 3, 12, 3] , __A : Optional[Any]=[8, 8, 4, 4] , __A : List[Any]="gelu" , __A : Optional[int]=0.0_2 , __A : Any=1e-6 , __A : str=1e-2 , __A : Union[str, Any]=0.0 , __A : str=0.0 , **__A : Dict , ): super().__init__(**__A ) __A : List[str] = image_size __A : List[str] = num_channels __A : Tuple = patch_sizes __A : Optional[Any] = strides __A : List[str] = hidden_sizes __A : Optional[int] = depths __A : Union[str, Any] = mlp_ratios __A : List[str] = hidden_act __A : List[str] = initializer_range __A : Tuple = layer_norm_eps __A : Optional[Any] = layer_scale_init_value __A : List[Any] = drop_path_rate __A : int = dropout_rate
17
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:int = """Hello, World!""" SCREAMING_SNAKE_CASE_:List[Any] = """en_XX""" def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : Optional[int] = Path("""data_bin""" ) A : Optional[Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , ) xmod.eval() # disable dropout print(_lowerCAmelCase ) A : Any = xmod.model.encoder.sentence_encoder A : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our X-MOD config:""" , _lowerCAmelCase ) A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase ) model.eval() # Now let's copy all the weights. # Embeddings A : Any = xmod_sent_encoder.embed_tokens.weight A : int = xmod_sent_encoder.embed_positions.weight A : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. A : Dict = xmod_sent_encoder.layernorm_embedding.weight A : int = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer A : str = model.roberta.encoder.layer[i] A : Tuple = xmod_sent_encoder.layers[i] # self attention A : Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("""Dimensions of self-attention weights do not match.""" ) A : List[str] = xmod_layer.self_attn.q_proj.weight A : Optional[int] = xmod_layer.self_attn.q_proj.bias A : List[Any] = xmod_layer.self_attn.k_proj.weight A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias A : Optional[int] = xmod_layer.self_attn.v_proj.weight A : Dict = xmod_layer.self_attn.v_proj.bias # self-attention output A : Optional[Any] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("""Dimensions of self-attention output weights do not match.""" ) A : Optional[Any] = xmod_layer.self_attn.out_proj.weight A : Dict = xmod_layer.self_attn.out_proj.bias A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight A : str = xmod_layer.self_attn_layer_norm.bias # intermediate A : str = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of intermediate weights do not match.""" ) A : Optional[int] = xmod_layer.fca.weight A : Optional[int] = xmod_layer.fca.bias # output A : Dict = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of feed-forward weights do not match.""" ) A : Union[str, Any] = xmod_layer.fca.weight A : int = xmod_layer.fca.bias A : List[str] = xmod_layer.final_layer_norm.weight A : Optional[Any] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: A : str = xmod_layer.adapter_layer_norm.weight A : str = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("""Lists of language adapters do not match.""" ) for lang_code, adapter in xmod_layer.adapter_modules.items(): A : Optional[int] = bert_output.adapter_modules[lang_code] A : int = xmod_layer.adapter_modules[lang_code] A : Optional[Any] = from_adapter.fca.weight A : Optional[Any] = from_adapter.fca.bias A : List[str] = from_adapter.fca.weight A : Any = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: A : Dict = xmod_sent_encoder.layer_norm.weight A : int = xmod_sent_encoder.layer_norm.bias if classification_head: A : int = xmod.model.classification_heads["""mnli"""].dense.weight A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head A : Any = xmod.model.encoder.lm_head.dense.weight A : Tuple = xmod.model.encoder.lm_head.dense.bias A : Any = xmod.model.encoder.lm_head.layer_norm.weight A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias A : Union[str, Any] = xmod.model.encoder.lm_head.weight A : Tuple = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_lowerCAmelCase ) A : List[str] = model(_lowerCAmelCase )[0] if classification_head: A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) ) else: A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) A : str = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
662
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,) __lowerCamelCase : int = (("num_inference_steps", 25),) def _snake_case ( self , **_lowerCAmelCase ) -> Any: _lowerCAmelCase = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**_lowerCAmelCase ) return config def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase , _lowerCAmelCase = sample, sample for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self ) -> int: pass def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]: _lowerCAmelCase = dict(self.forward_default_kwargs ) _lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase ) _lowerCAmelCase = self.dummy_sample _lowerCAmelCase = 0.1 * sample _lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) _lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) _lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample _lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple: if scheduler is None: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample return sample def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = 50 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 _lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> str: self.check_over_configs(thresholding=_lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , ) def _snake_case ( self ) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) _lowerCAmelCase = self.full_loop( solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , ) assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers" def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lower_order_final=_lowerCAmelCase ) self.check_over_configs(lower_order_final=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def _snake_case ( self ) -> str: self.check_over_configs(variance_type=_lowerCAmelCase ) self.check_over_configs(variance_type="learned_range" ) def _snake_case ( self ) -> int: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 ) def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop() _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def _snake_case ( self ) -> Any: _lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 10 _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
18
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): A : Any = tempfile.mkdtemp() A : List[str] = BlipImageProcessor() A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self, **lowerCamelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer def _lowerCAmelCase ( self, **lowerCamelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor def _lowerCAmelCase ( self ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )] A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" ) A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 ) A : Dict = BlipProcessor.from_pretrained( self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = self.get_image_processor() A : str = self.get_tokenizer() A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Any = self.prepare_image_inputs() A : int = image_processor(lowerCamelCase__, return_tensors="""np""" ) A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def _lowerCAmelCase ( self ): A : List[str] = self.get_image_processor() A : int = self.get_tokenizer() A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[Any] = """lower newer""" A : List[Any] = processor(text=lowerCamelCase__ ) A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def _lowerCAmelCase ( self ): A : List[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[int] = """lower newer""" A : Union[str, Any] = self.prepare_image_inputs() A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase__ ): processor() def _lowerCAmelCase ( self ): A : List[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A : Optional[int] = processor.batch_decode(lowerCamelCase__ ) A : Dict = tokenizer.batch_decode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = self.get_image_processor() A : int = self.get_tokenizer() A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[int] = """lower newer""" A : List[str] = self.prepare_image_inputs() A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
662
0
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
19
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ): return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy''' def _lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ): A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ ) return image def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ): A : str = jnp.bfloataa if fpaa else jnp.floataa A : Union[str, Any] = """bf16""" if fpaa else None A , A : str = FlaxUNetaDConditionModel.from_pretrained( lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ ) return model, params def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ): A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ ) A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ ) A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ ) A : Optional[Any] = model.apply( {"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample assert sample.shape == latents.shape A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ ) A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ ) A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ ) A : Dict = model.apply( {"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample assert sample.shape == latents.shape A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
662
0
def _lowercase( __a : int ): a__ =len(__a ) a__ =sum(__a ) a__ =[[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): a__ =True for i in range(1 , s + 1 ): a__ =False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): a__ =dp[i][j - 1] if arr[i - 1] <= j: a__ =dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: a__ =s - 2 * j break return diff
20
from typing import Any import numpy as np def __UpperCamelCase ( _lowerCAmelCase ) -> bool: """simple docstring""" return np.array_equal(_lowerCAmelCase , matrix.conjugate().T ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : Any = v.conjugate().T A : List[Any] = v_star.dot(_lowerCAmelCase ) assert isinstance(_lowerCAmelCase , np.ndarray ) return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase )) def __UpperCamelCase ( ) -> None: """simple docstring""" A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) A : str = np.array([[1], [2], [3]] ) assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.''' print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) ) A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.''' assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
662
0
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __A ( unittest.TestCase ): def A__ ( self :Tuple ): '''simple docstring''' debug_launcher(test_script.main ) def A__ ( self :Dict ): '''simple docstring''' debug_launcher(test_ops.main )
21
from __future__ import annotations import numpy as np def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]: """simple docstring""" A , A : int = np.shape(_lowerCAmelCase ) if rows != columns: A : Union[str, Any] = ( """'table' has to be of square shaped array but got a """ f'''{rows}x{columns} array:\n{table}''' ) raise ValueError(_lowerCAmelCase ) A : Union[str, Any] = np.zeros((rows, columns) ) A : Dict = np.zeros((rows, columns) ) for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) ) if upper[j][j] == 0: raise ArithmeticError("""No LU decomposition exists""" ) A : Any = (table[i][j] - total) / upper[j][j] A : Union[str, Any] = 1 for j in range(_lowerCAmelCase , _lowerCAmelCase ): A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) ) A : str = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
662
0
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _snake_case : List[Any] = logging.get_logger(__name__) def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, Iterable[int]] , UpperCamelCase : bool , UpperCamelCase : int ): '''simple docstring''' def constraint_to_multiple_of(UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Tuple=None ): _a = round(val / multiple ) * multiple if max_val is not None and x > max_val: _a = math.floor(val / multiple ) * multiple if x < min_val: _a = math.ceil(val / multiple ) * multiple return x _a = (output_size, output_size) if isinstance(UpperCamelCase , UpperCamelCase ) else output_size _a , _a = get_image_size(UpperCamelCase ) _a , _a = output_size # determine new height and width _a = output_height / input_height _a = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _a = scale_width else: # fit height _a = scale_height _a = constraint_to_multiple_of(scale_height * input_height , multiple=UpperCamelCase ) _a = constraint_to_multiple_of(scale_width * input_width , multiple=UpperCamelCase ) return (new_height, new_width) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : int , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : List[Any] , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''height''': 3_84, '''width''': 3_84} _a = get_size_dict(lowerCAmelCase_ ) _a = do_resize _a = size _a = keep_aspect_ratio _a = ensure_multiple_of _a = resample _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) _a = get_resize_output_image_size( lowerCAmelCase_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=lowerCAmelCase_ , multiple=lowerCAmelCase_ , ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> List[Any]: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> PIL.Image.Image: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ ) _a = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _a = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _a = resample if resample is not None else self.resample _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Union[str, Any]: """simple docstring""" _a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase_ ): _a = target_sizes.numpy() _a = [] for idx in range(len(lowerCAmelCase_ ) ): _a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ ) _a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: _a = logits.argmax(dim=1 ) _a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ): A : Optional[int] = round(val / multiple ) * multiple if max_val is not None and x > max_val: A : Optional[Any] = math.floor(val / multiple ) * multiple if x < min_val: A : Any = math.ceil(val / multiple ) * multiple return x A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size A , A : List[Any] = get_image_size(_lowerCAmelCase ) A , A : List[Any] = output_size # determine new height and width A : Optional[int] = output_height / input_height A : Optional[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width A : Any = scale_width else: # fit height A : int = scale_height A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase ) A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[int] = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : int = size if size is not None else {"""height""": 384, """width""": 384} A : str = get_size_dict(lowerCamelCase__ ) A : Optional[Any] = do_resize A : Optional[int] = size A : Union[str, Any] = keep_aspect_ratio A : int = ensure_multiple_of A : Dict = resample A : Optional[Any] = do_rescale A : Any = rescale_factor A : str = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Dict = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) A : Optional[Any] = get_resize_output_image_size( lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, ) return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__ ) A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of A : Tuple = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : int = rescale_factor if rescale_factor is not None else self.rescale_factor A : int = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : Optional[int] = image_std if image_std is not None else self.image_std A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : str = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(lowerCamelCase__ ): A : int = target_sizes.numpy() A : Union[str, Any] = [] for idx in range(len(lowerCamelCase__ ) ): A : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ ) A : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase__ ) else: A : List[str] = logits.argmax(dim=1 ) A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
662
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ : Dict = 1_6 snake_case__ : List[str] = 3_2 def _snake_case (__lowercase , __lowercase = 16): UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased') UpperCamelCase_ = load_dataset('glue' , 'mrpc') def tokenize_function(__lowercase): # max_length=None => use the model max length (it's actually the default) UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase_ = datasets.map( __lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(__lowercase): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase_ = 16 elif accelerator.mixed_precision != "no": UpperCamelCase_ = 8 else: UpperCamelCase_ = None return tokenizer.pad( __lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , ) # Instantiate dataloaders. UpperCamelCase_ = DataLoader( tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) UpperCamelCase_ = DataLoader( tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case__ : List[str] = mocked_dataloaders # noqa: F811 def _snake_case (__lowercase , __lowercase): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1": UpperCamelCase_ = 2 # New Code # UpperCamelCase_ = int(args.gradient_accumulation_steps) # Initialize accelerator UpperCamelCase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`') # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase_ = config['lr'] UpperCamelCase_ = int(config['num_epochs']) UpperCamelCase_ = int(config['seed']) UpperCamelCase_ = int(config['batch_size']) UpperCamelCase_ = evaluate.load('glue' , 'mrpc') set_seed(__lowercase) UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase_ = model.to(accelerator.device) # Instantiate optimizer UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase) # Instantiate scheduler UpperCamelCase_ = get_linear_schedule_with_warmup( optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # Now we train the model for epoch in range(__lowercase): model.train() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowercase): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = output.loss accelerator.backward(__lowercase) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = outputs.logits.argmax(dim=-1) UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=__lowercase , references=__lowercase , ) UpperCamelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowercase) def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(__lowercase , __lowercase) if __name__ == "__main__": main()
23
class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__ ): # we need a list not a string, so do something to change the type A : List[Any] = arr.split(""",""" ) def _lowerCAmelCase ( self ): A : int = [int(self.array[0] )] * len(self.array ) A : Optional[Any] = [int(self.array[0] )] * len(self.array ) for i in range(1, len(self.array ) ): A : Union[str, Any] = max( int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) ) A : Dict = max(sum_value[i], rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""") SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array) SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array() print(("""the results is:""", re))
662
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> int: '''simple docstring''' __snake_case = SwinConfig( embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , ) __snake_case = DetaConfig( backbone_config=_lowerCamelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , ) # set labels __snake_case = '''huggingface/label-files''' if "o365" in model_name: __snake_case = 3_66 __snake_case = '''object365-id2label.json''' else: __snake_case = 91 __snake_case = '''coco-detection-id2label.json''' __snake_case = num_labels __snake_case = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) __snake_case = {int(_lowerCamelCase ): v for k, v in idalabel.items()} __snake_case = idalabel __snake_case = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Optional[Any]: '''simple docstring''' __snake_case = [] # stem # fmt: off rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') ) rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') ) rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') ) rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') ) rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') ) rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any )-> List[Any]: '''simple docstring''' __snake_case = dct.pop(_lowerCamelCase ) __snake_case = val def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : Dict )-> Union[str, Any]: '''simple docstring''' __snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __snake_case = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) __snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __snake_case = in_proj_weight[:dim, :] __snake_case = in_proj_bias[: dim] __snake_case = in_proj_weight[ dim : dim * 2, : ] __snake_case = in_proj_bias[ dim : dim * 2 ] __snake_case = in_proj_weight[ -dim :, : ] __snake_case = in_proj_bias[-dim :] # fmt: on def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] )-> Any: '''simple docstring''' __snake_case = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention __snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) __snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __snake_case = in_proj_weight[:hidden_size, :] __snake_case = in_proj_bias[:hidden_size] __snake_case = in_proj_weight[ hidden_size : hidden_size * 2, : ] __snake_case = in_proj_bias[hidden_size : hidden_size * 2] __snake_case = in_proj_weight[-hidden_size:, :] __snake_case = in_proj_bias[-hidden_size:] def _UpperCamelCase ()-> Optional[Any]: '''simple docstring''' __snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Optional[Any]: '''simple docstring''' __snake_case = get_deta_config(_lowerCamelCase ) # load original state dict if model_name == "deta-swin-large": __snake_case = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' ) elif model_name == "deta-swin-large-o365": __snake_case = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' ) else: raise ValueError(f'''Model name {model_name} not supported''' ) __snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model'''] # original state dict for name, param in state_dict.items(): print(_lowerCamelCase , param.shape ) # rename keys __snake_case = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config ) read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: __snake_case = state_dict.pop(_lowerCamelCase ) __snake_case = val if "input_proj" in key: __snake_case = state_dict.pop(_lowerCamelCase ) __snake_case = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: __snake_case = state_dict.pop(_lowerCamelCase ) __snake_case = val # finally, create HuggingFace model and load state dict __snake_case = DetaForObjectDetection(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() __snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu''' model.to(_lowerCamelCase ) # load image processor __snake_case = DetaImageProcessor(format='''coco_detection''' ) # verify our conversion on image __snake_case = prepare_img() __snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ) __snake_case = encoding['''pixel_values'''] __snake_case = model(pixel_values.to(_lowerCamelCase ) ) # verify logits print('''Logits:''' , outputs.logits[0, :3, :3] ) print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": __snake_case = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) __snake_case = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": __snake_case = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) __snake_case = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1E-4 ) print('''Everything ok!''' ) if pytorch_dump_folder_path: # Save model and processor logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) # Push to hub if push_to_hub: print('''Pushing model and processor to hub...''' ) model.push_to_hub(f'''jozhang97/{model_name}''' ) processor.push_to_hub(f'''jozhang97/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
24
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:List[Any] = { """google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[Any] = "bit" __lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"] __lowerCamelCase : Union[str, Any] = ["SAME", "VALID"] def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A : List[Any] = global_padding.upper() else: raise ValueError(f'''Padding strategy {global_padding} not supported''' ) A : Dict = num_channels A : List[Any] = embedding_size A : Optional[Any] = hidden_sizes A : str = depths A : str = layer_type A : Union[str, Any] = hidden_act A : Any = global_padding A : Optional[int] = num_groups A : Dict = drop_path_rate A : List[Any] = embedding_dynamic_padding A : List[Any] = output_stride A : Union[str, Any] = width_factor A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )] A , A : Any = get_aligned_output_features_output_indices( out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
662
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ): A : List[str] = parent A : List[str] = batch_size A : Optional[int] = seq_length A : Optional[int] = is_training A : Tuple = use_input_mask A : Optional[Any] = vocab_size A : str = hidden_size A : Any = num_hidden_layers A : List[Any] = num_attention_heads A : Optional[int] = intermediate_size A : int = hidden_act A : Dict = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : List[Any] = max_position_embeddings A : int = initializer_range A : Tuple = use_labels A : List[str] = scope def _lowerCAmelCase ( self ): A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : int = None if self.use_input_mask: A : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def _lowerCAmelCase ( self ): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, ) def _lowerCAmelCase ( self ): ( ( A ) , ( A ) , ( A ) , ( A ) , ) : List[Any] = self.prepare_config_and_inputs() A : Any = True A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : str = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ ) A : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, ) A : Optional[Any] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : Union[str, Any] = True A : Optional[int] = True A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval() # first forward pass A : int = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, ) A : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size ) A : int = ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 ) A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 ) A : List[str] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] # select random slice A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item() A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() A : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ): A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self ): A , A , A , A : str = self.prepare_config_and_inputs() A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else () __lowerCamelCase : List[Any] = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def _lowerCAmelCase ( self ): A : Any = BertGenerationEncoderTester(self ) A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 ) def _lowerCAmelCase ( self ): self.config_tester.run_common_tests() def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs() A : Any = """bert""" self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): # This regression test was failing with PyTorch < 1.3 ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() A : int = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ) def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def _lowerCAmelCase ( self ): A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Union[str, Any] = model(lowerCamelCase__ )[0] A : List[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Tuple = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Dict = model(lowerCamelCase__ )[0] A : List[str] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Optional[Any] = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
662
0
'''simple docstring''' from typing import List from .keymap import KEYMAP, get_character def _a ( _lowerCamelCase ) -> int: """simple docstring""" def decorator(_lowerCamelCase ): __snake_case : str = getattr(_lowerCamelCase , """handle_key""" , [] ) handle += [key] setattr(_lowerCamelCase , """handle_key""" , _lowerCamelCase ) return func return decorator def _a ( *_lowerCamelCase ) -> str: """simple docstring""" def decorator(_lowerCamelCase ): __snake_case : List[Any] = getattr(_lowerCamelCase , """handle_key""" , [] ) handle += keys setattr(_lowerCamelCase , """handle_key""" , _lowerCamelCase ) return func return decorator class _A ( __lowercase ): def __new__( cls : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : str = super().__new__(cls , __magic_name__ , __magic_name__ , __magic_name__ ) if not hasattr(__magic_name__ , """key_handler""" ): setattr(__magic_name__ , """key_handler""" , {} ) setattr(__magic_name__ , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): __snake_case : Optional[int] = getattr(__magic_name__ , """handle_key""" , [] ) for key in handled_keys: __snake_case : int = value return new_cls @staticmethod def lowercase__ ( cls : Any ) -> Dict: """simple docstring""" __snake_case : Optional[Any] = get_character() if char != KEYMAP["undefined"]: __snake_case : Tuple = ord(__magic_name__ ) __snake_case : List[Any] = cls.key_handler.get(__magic_name__ ) if handler: __snake_case : Any = char return handler(cls ) else: return None def _a ( cls ) -> str: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
26
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : str = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384} A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Optional[Any] = do_resize A : Dict = size # Default value set here for backwards compatibility where the value in config is None A : Dict = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : List[str] = do_rescale A : Tuple = rescale_factor A : Optional[int] = do_normalize A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) A : List[str] = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : int = int(shortest_edge / crop_pct ) A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Dict = do_resize if do_resize is not None else self.do_resize A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct A : str = resample if resample is not None else self.resample A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor A : Dict = do_normalize if do_normalize is not None else self.do_normalize A : List[str] = image_mean if image_mean is not None else self.image_mean A : Optional[Any] = image_std if image_std is not None else self.image_std A : Optional[Any] = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Dict = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
662
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = ['image_processor', 'tokenizer'] __magic_name__ = 'ViltImageProcessor' __magic_name__ = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ): _A = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , snake_case_ , ) _A = kwargs.pop('feature_extractor' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(snake_case_ , snake_case_ ) _A = self.image_processor def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ): _A = self.tokenizer( text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , ) # add pixel_values + pixel_mask _A = self.image_processor(snake_case_ , return_tensors=snake_case_ ) encoding.update(snake_case_ ) return encoding def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) @property def lowerCAmelCase__ ( self ): _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCAmelCase__ ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , ) return self.image_processor_class @property def lowerCAmelCase__ ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , ) return self.image_processor
27
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" A : Dict = """backbone.""" if is_semantic else """""" A : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', """beit.embeddings.cls_token"""), (f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""), (f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""), (f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("""mask_token""", """beit.embeddings.mask_token"""), ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) else: # layernorm + classification head rename_keys.extend( [ ("""fc_norm.weight""", """beit.pooler.layernorm.weight"""), ("""fc_norm.bias""", """beit.pooler.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" for i in range(config.num_hidden_layers ): A : Dict = """backbone.""" if is_semantic else """""" # queries, keys and values A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) A : int = in_proj_weight[ : config.hidden_size, : ] A : Any = q_bias A : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A : Tuple = in_proj_weight[ -config.hidden_size :, : ] A : Union[str, Any] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) A : Dict = gamma_a A : Dict = gamma_a def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: """simple docstring""" A : List[str] = dct.pop(_lowerCAmelCase ) A : Optional[Any] = val def __UpperCamelCase ( ) -> List[str]: """simple docstring""" A : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str: """simple docstring""" A : Dict = False if """rvlcdip""" in checkpoint_url else True A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: A : Dict = 1024 A : List[Any] = 4096 A : int = 24 A : int = 16 # labels if "rvlcdip" in checkpoint_url: A : List[Any] = 16 A : List[Any] = """huggingface/label-files""" A : int = """rvlcdip-id2label.json""" A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} A : int = idalabel A : Union[str, Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""] A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase ) # load HuggingFace model A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase ) model.eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A : Any = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase ) A : int = prepare_img() A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) A : str = encoding["""pixel_values"""] A : Tuple = model(_lowerCAmelCase ) A : Optional[int] = outputs.logits # verify logits A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: if has_lm_head: A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large""" else: A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip""" image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
662
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging UpperCamelCase_ = logging.get_logger(__name__) def lowercase__( __UpperCamelCase: Union[tf.Tensor, np.ndarray] ): """simple docstring""" if isinstance(__UpperCamelCase ,np.ndarray ): return list(tensor.shape ) SCREAMING_SNAKE_CASE : Union[str, Any] = tf.shape(__UpperCamelCase ) if tensor.shape == tf.TensorShape(__UpperCamelCase ): return dynamic SCREAMING_SNAKE_CASE : Union[str, Any] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(__UpperCamelCase )] def lowercase__( __UpperCamelCase: tf.Tensor ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[str] = None ): """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9 ,axis=__UpperCamelCase ,name=__UpperCamelCase ) def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ,__UpperCamelCase: Tuple ,__UpperCamelCase: List[Any]=1e-5 ,__UpperCamelCase: Dict=-1 ): """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' ) # Get mean and variance on the axis to be normalized SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = tf.nn.moments(__UpperCamelCase ,axes=[axis] ,keepdims=__UpperCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis SCREAMING_SNAKE_CASE : Any = [1] * inputs.shape.rank SCREAMING_SNAKE_CASE : str = shape_list(__UpperCamelCase )[axis] SCREAMING_SNAKE_CASE : Optional[int] = tf.reshape(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = tf.reshape(__UpperCamelCase ,__UpperCamelCase ) # Compute layer normalization using the batch_normalization # function. SCREAMING_SNAKE_CASE : Optional[int] = tf.nn.batch_normalization( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,offset=__UpperCamelCase ,scale=__UpperCamelCase ,variance_epsilon=__UpperCamelCase ,) return outputs def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str=0 ,__UpperCamelCase: Optional[Any]=-1 ): """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input SCREAMING_SNAKE_CASE : Union[str, Any] = tf.shape(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) SCREAMING_SNAKE_CASE : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 ) return tf.reshape(__UpperCamelCase ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: tf.Tensor ): """simple docstring""" if not isinstance(__UpperCamelCase ,tf.Tensor ): SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor(__UpperCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: SCREAMING_SNAKE_CASE : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: SCREAMING_SNAKE_CASE : Dict = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) SCREAMING_SNAKE_CASE : Optional[int] = ( tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__( __UpperCamelCase: tf.Tensor ,__UpperCamelCase: int ,__UpperCamelCase: str = "input_ids" ): """simple docstring""" tf.debugging.assert_less( __UpperCamelCase ,tf.cast(__UpperCamelCase ,dtype=tensor.dtype ) ,message=( f"The maximum value of {tensor_name} ({tf.math.reduce_max(__UpperCamelCase )}) must be smaller than the embedding " f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ) ,) def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = 6_45_12 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. SCREAMING_SNAKE_CASE : int = [x for x in data if len(__UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( 'The following attributes cannot be saved to HDF5 file because ' f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) SCREAMING_SNAKE_CASE : List[str] = np.asarray(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : Tuple = np.array_split(__UpperCamelCase ,__UpperCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 SCREAMING_SNAKE_CASE : List[Any] = np.array_split(__UpperCamelCase ,__UpperCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = chunk_data else: SCREAMING_SNAKE_CASE : str = data def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: List[str] ): """simple docstring""" if name in group.attrs: SCREAMING_SNAKE_CASE : Union[str, Any] = [n.decode('utf8' ) if hasattr(__UpperCamelCase ,'decode' ) else n for n in group.attrs[name]] else: SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : str = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('utf8' ) if hasattr(__UpperCamelCase ,'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" def _expand_single_ad_tensor(__UpperCamelCase: int ): if isinstance(__UpperCamelCase ,tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(__UpperCamelCase ,axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor ,__UpperCamelCase )
28
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""", lowerCamelCase__, ) super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
662
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
29
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, ) A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths} A : str = Text( cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, ) def _lowerCAmelCase ( self ): # Build iterable dataset if self.streaming: A : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A : List[str] = None A : Dict = None A : Tuple = None A : Tuple = None self.builder.download_and_prepare( download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, ) A : List[str] = self.builder.as_dataset( split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory ) return dataset
662
0
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(_lowercase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors UpperCAmelCase_ : Tuple = load_file(_lowercase ) UpperCAmelCase_ : Union[str, Any] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: UpperCAmelCase_ : List[Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) UpperCAmelCase_ : Optional[int] = pipeline.text_encoder else: UpperCAmelCase_ : int = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) UpperCAmelCase_ : str = pipeline.unet # find the target layer UpperCAmelCase_ : str = layer_infos.pop(0 ) while len(_lowercase ) > -1: try: UpperCAmelCase_ : Optional[Any] = curr_layer.__getattr__(_lowercase ) if len(_lowercase ) > 0: UpperCAmelCase_ : List[Any] = layer_infos.pop(0 ) elif len(_lowercase ) == 0: break except Exception: if len(_lowercase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: UpperCAmelCase_ : Dict = layer_infos.pop(0 ) UpperCAmelCase_ : Optional[int] = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(_lowercase ) else: pair_keys.append(_lowercase ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: UpperCAmelCase_ : Union[str, Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) UpperCAmelCase_ : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowercase , _lowercase ).unsqueeze(2 ).unsqueeze(3 ) else: UpperCAmelCase_ : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa ) UpperCAmelCase_ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowercase , _lowercase ) # update visited list for item in pair_keys: visited.append(_lowercase ) return pipeline if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') __a = parser.parse_args() __a = args.base_model_path __a = args.checkpoint_path __a = args.dump_path __a = args.lora_prefix_unet __a = args.lora_prefix_text_encoder __a = args.alpha __a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __a = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
30
from typing import TYPE_CHECKING from ....utils import _LazyModule SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
662
0
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = (32, 32) SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase ) return image @property def lowerCAmelCase_ ( self : Union[str, Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def lowerCAmelCase_ ( self : Tuple ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def lowerCAmelCase_ ( self : Optional[int] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , ) return RobertaSeriesModelWithTransformation(_lowerCAmelCase ) @property def lowerCAmelCase_ ( self : List[Any] ): def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ): class lowerCamelCase_ : '''simple docstring''' def __init__( self : str ): SCREAMING_SNAKE_CASE_ = torch.ones([0] ) def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ): self.pixel_values.to(_lowerCAmelCase ) return self return Out() return extract def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.dummy_vae SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) SCREAMING_SNAKE_CASE_ = 77 SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline( unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , ) SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger' SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ = alt_pipe( [prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ) SCREAMING_SNAKE_CASE_ = output.images SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ = alt_pipe( [prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0] SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowerCAmelCase_ ( self : Tuple ): SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.dummy_vae SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) SCREAMING_SNAKE_CASE_ = 77 SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase ) # put models in fp16 SCREAMING_SNAKE_CASE_ = unet.half() SCREAMING_SNAKE_CASE_ = vae.half() SCREAMING_SNAKE_CASE_ = bert.half() # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline( unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , ) SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger' SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = alt_pipe( [prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) ) SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion' SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained( _lowerCAmelCase , safety_checker=_lowerCAmelCase , ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation' SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = pipe( prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , ) SCREAMING_SNAKE_CASE_ = output.images[0] SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) ) SCREAMING_SNAKE_CASE_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' ) SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion' SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained( _lowerCAmelCase , safety_checker=_lowerCAmelCase , ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation' SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = pipe( prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , ) SCREAMING_SNAKE_CASE_ = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
31
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int: """simple docstring""" A , A : str = 1, 1 A : List[Any] = [] for i in range(1 , n + 1 ): A : Optional[int] = prev_numerator + 2 * prev_denominator A : Any = prev_numerator + prev_denominator if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ): result.append(_lowerCAmelCase ) A : int = numerator A : int = denominator return len(_lowerCAmelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
662
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
import re def __UpperCamelCase ( _lowerCAmelCase ) -> str: """simple docstring""" if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
662
0
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) lowerCamelCase__ : List[Any] = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]: snake_case__ = {} state_dict.pop('''pixel_mean''' , __lowerCAmelCase ) state_dict.pop('''pixel_std''' , __lowerCAmelCase ) snake_case__ = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: snake_case__ = key.replace(__lowerCAmelCase , __lowerCAmelCase ) if re.match(__lowerCAmelCase , __lowerCAmelCase ): snake_case__ = int(re.match(__lowerCAmelCase , __lowerCAmelCase ).group(2 ) ) if layer_nb == 0: snake_case__ = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: snake_case__ = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: snake_case__ = key.replace('''layers.2''' , '''proj_out''' ) snake_case__ = value snake_case__ = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="ybelkada/segment-anything" ) -> Tuple: snake_case__ = hf_hub_download(__lowerCAmelCase , F"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: snake_case__ = SamConfig() elif "sam_vit_l" in model_name: snake_case__ = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) snake_case__ = SamConfig( vision_config=__lowerCAmelCase , ) elif "sam_vit_h" in model_name: snake_case__ = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) snake_case__ = SamConfig( vision_config=__lowerCAmelCase , ) snake_case__ = torch.load(__lowerCAmelCase , map_location='''cpu''' ) snake_case__ = replace_keys(__lowerCAmelCase ) snake_case__ = SamImageProcessor() snake_case__ = SamProcessor(image_processor=__lowerCAmelCase ) snake_case__ = SamModel(__lowerCAmelCase ) hf_model.load_state_dict(__lowerCAmelCase ) snake_case__ = hf_model.to('''cuda''' ) snake_case__ = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' ) snake_case__ = [[[400, 650]]] snake_case__ = [[1]] snake_case__ = processor(images=np.array(__lowerCAmelCase ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case__ = hf_model(**__lowerCAmelCase ) snake_case__ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 snake_case__ = processor( images=np.array(__lowerCAmelCase ) , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case__ = hf_model(**__lowerCAmelCase ) snake_case__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 snake_case__ = ((75, 275, 1725, 850),) snake_case__ = processor(images=np.array(__lowerCAmelCase ) , input_boxes=__lowerCAmelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case__ = hf_model(**__lowerCAmelCase ) snake_case__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. snake_case__ = [[[400, 650], [800, 650]]] snake_case__ = [[1, 1]] snake_case__ = processor( images=np.array(__lowerCAmelCase ) , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case__ = hf_model(**__lowerCAmelCase ) snake_case__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": lowerCamelCase__ : Tuple = argparse.ArgumentParser() lowerCamelCase__ : int = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) lowerCamelCase__ : Union[str, Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
33
from __future__ import annotations SCREAMING_SNAKE_CASE_:Tuple = """#""" class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self ): A : dict = {} def _lowerCAmelCase ( self, lowerCamelCase__ ): A : List[Any] = self._trie for char in text: if char not in trie: A : str = {} A : str = trie[char] A : Optional[int] = True def _lowerCAmelCase ( self, lowerCamelCase__ ): A : Dict = self._trie for char in prefix: if char in trie: A : Optional[Any] = trie[char] else: return [] return self._elements(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__ ): A : int = [] for c, v in d.items(): A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )] result.extend(lowerCamelCase__ ) return tuple(lowerCamelCase__ ) SCREAMING_SNAKE_CASE_:Any = Trie() SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def __UpperCamelCase ( _lowerCAmelCase ) -> tuple: """simple docstring""" A : List[str] = trie.find_word(_lowerCAmelCase ) return tuple(string + word for word in suffixes ) def __UpperCamelCase ( ) -> None: """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
662
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class snake_case_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self) -> Dict: UpperCamelCase = tempfile.mkdtemp() # fmt: off UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } UpperCamelCase = os.path.join(self.tmpdirname , lowerCamelCase_) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(lowerCamelCase_ , lowerCamelCase_) def UpperCAmelCase__ ( self , **lowerCamelCase_) -> int: return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_) def UpperCAmelCase__ ( self , **lowerCamelCase_) -> int: return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_) def UpperCAmelCase__ ( self) -> Dict: shutil.rmtree(self.tmpdirname) def UpperCAmelCase__ ( self) -> Tuple: UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)] UpperCamelCase = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1)) for x in image_inputs] return image_inputs def UpperCAmelCase__ ( self) -> Union[str, Any]: UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_image_processor() UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_) processor.save_pretrained(self.tmpdirname) UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , lowerCamelCase_) def UpperCAmelCase__ ( self) -> str: UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') UpperCamelCase = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0) UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowerCamelCase_) def UpperCAmelCase__ ( self) -> Dict: UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_) UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = image_processor(lowerCamelCase_ , return_tensors='''np''') UpperCamelCase = processor(images=lowerCamelCase_ , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def UpperCAmelCase__ ( self) -> Union[str, Any]: UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_) UpperCamelCase = '''lower newer''' UpperCamelCase = processor(text=lowerCamelCase_) UpperCamelCase = tokenizer(lowerCamelCase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def UpperCAmelCase__ ( self) -> Dict: UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_) UpperCamelCase = '''lower newer''' UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=lowerCamelCase_ , images=lowerCamelCase_) self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values''']) # test if it raises when no input is passed with self.assertRaises(lowerCamelCase_): processor() def UpperCAmelCase__ ( self) -> str: UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_) UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase = processor.batch_decode(lowerCamelCase_) UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_) def UpperCAmelCase__ ( self) -> Dict: UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_) UpperCamelCase = '''lower newer''' UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=lowerCamelCase_ , images=lowerCamelCase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
34
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = bnb_quantization_config.load_in_abit A : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) A : Any = [] # custom device map if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1: A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A : int = get_keys_to_not_convert(_lowerCAmelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_lowerCAmelCase ) A : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A : Dict = [] A : Tuple = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_lowerCAmelCase ) # compatibility with peft A : Union[str, Any] = load_in_abit A : Tuple = load_in_abit A : List[str] = get_parameter_device(_lowerCAmelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase ) # convert param to the right dtype A : Tuple = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_lowerCAmelCase ): param.to(_lowerCAmelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): A : str = replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase ) A : Optional[Any] = get_quantized_model_device_map( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A : Tuple = True A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]: """simple docstring""" if device_map is None: if torch.cuda.is_available(): A : Optional[int] = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) A : Tuple = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A : Any = {} A : List[str] = special_dtypes A : Any = no_split_module_classes A : Union[str, Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A : Tuple = get_balanced_memory( _lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , ) A : int = max_memory A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # check if don't have any quantized module on the cpu A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A : Optional[int] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: """simple docstring""" if modules_to_not_convert is None: A : Optional[Any] = [] A , A : Dict = _replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int: """simple docstring""" A : Optional[int] = False for name, module in model.named_children(): if current_key_name is None: A : int = [] current_key_name.append(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A : Dict = """.""".join(_lowerCAmelCase ) A : Optional[Any] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A : Dict = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A : Optional[Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: A : Dict = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) A : Any = module.weight.data if module.bias is not None: A : Any = module.bias.data bnb_module.requires_grad_(_lowerCAmelCase ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Dict = True if len(list(module.children() ) ) > 0: A , A : Dict = _replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" with init_empty_weights(): A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A : Optional[int] = find_tied_parameters(_lowerCAmelCase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowerCAmelCase , _lowerCAmelCase ): A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A : Optional[int] = sum(_lowerCAmelCase , [] ) A : Tuple = len(_lowerCAmelCase ) > 0 # Check if it is a base model A : List[str] = False if hasattr(_lowerCAmelCase , """base_model_prefix""" ): A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A : str = list(model.named_children() ) A : Tuple = [list_modules[-1][0]] # add last module together with tied weights A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase ) # remove ".weight" from the keys A : Union[str, Any] = [""".weight""", """.bias"""] A : Optional[int] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A : List[str] = name.replace(_lowerCAmelCase , """""" ) filtered_module_names.append(_lowerCAmelCase ) return filtered_module_names def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]: """simple docstring""" for m in model.modules(): if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ): return True return False def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]: """simple docstring""" return next(parameter.parameters() ).device def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase ) A : Tuple = param_name A : Union[str, Any] = model if "." in tensor_name: A : int = tensor_name.split(""".""" ) for split in splits[:-1]: A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) A : Optional[Any] = new_module A : List[str] = splits[-1] # offload weights A : Optional[int] = False offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , ) else: offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase ) offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase ) set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
662
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( _UpperCAmelCase ): lowerCamelCase : List[Any] = (DDIMParallelScheduler,) lowerCamelCase : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def lowercase__ ( self : Optional[int] , **_lowercase : Any ): SCREAMING_SNAKE_CASE__ : int = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**_lowercase ) return config def lowercase__ ( self : Optional[Any] , **_lowercase : Optional[int] ): SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config(**_lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = 10, 0.0 SCREAMING_SNAKE_CASE__ : str = self.dummy_model() SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter scheduler.set_timesteps(_lowercase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase ).prev_sample return sample def lowercase__ ( self : List[str] ): for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=_lowercase ) def lowercase__ ( self : Optional[Any] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_lowercase ) SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config(steps_offset=1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_class(**_lowercase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def lowercase__ ( self : Optional[Any] ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase ) def lowercase__ ( self : Optional[Any] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowercase ) def lowercase__ ( self : List[Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowercase ) def lowercase__ ( self : List[str] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowercase ) def lowercase__ ( self : Optional[int] ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_lowercase ) def lowercase__ ( self : Union[str, Any] ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_lowercase ) def lowercase__ ( self : str ): self.check_over_configs(thresholding=_lowercase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , ) def lowercase__ ( self : List[Any] ): for t in [1, 10, 49]: self.check_over_forward(time_step=_lowercase ) def lowercase__ ( self : Union[str, Any] ): for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=_lowercase , num_inference_steps=_lowercase ) def lowercase__ ( self : Any ): for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_lowercase , eta=_lowercase ) def lowercase__ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Tuple = scheduler_class(**_lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5 def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = 10, 0.0 scheduler.set_timesteps(_lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_model() SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter + 0.1 SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter - 0.1 SCREAMING_SNAKE_CASE__ : List[str] = samplea.shape[0] SCREAMING_SNAKE_CASE__ : int = torch.stack([samplea, samplea, samplea] , dim=0 ) SCREAMING_SNAKE_CASE__ : Tuple = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowercase ) SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(_lowercase ) ) SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def lowercase__ ( self : Dict ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.full_loop() SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_lowercase ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.223967 ) < 1E-3 def lowercase__ ( self : Dict ): SCREAMING_SNAKE_CASE__ : Any = self.full_loop(prediction_type='''v_prediction''' ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.sum(torch.abs(_lowercase ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def lowercase__ ( self : Optional[Any] ): # We specify different beta, so that the first alpha is 0.99 SCREAMING_SNAKE_CASE__ : List[str] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 ) SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(_lowercase ) ) SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def lowercase__ ( self : Tuple ): # We specify different beta, so that the first alpha is 0.99 SCREAMING_SNAKE_CASE__ : Dict = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 ) SCREAMING_SNAKE_CASE__ : List[str] = torch.sum(torch.abs(_lowercase ) ) SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
35
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def __UpperCamelCase ( ) -> Dict: """simple docstring""" A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(_lowerCAmelCase ) DownloadCommand.register_subcommand(_lowerCAmelCase ) EnvironmentCommand.register_subcommand(_lowerCAmelCase ) RunCommand.register_subcommand(_lowerCAmelCase ) ServeCommand.register_subcommand(_lowerCAmelCase ) UserCommands.register_subcommand(_lowerCAmelCase ) AddNewModelCommand.register_subcommand(_lowerCAmelCase ) AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase ) LfsCommands.register_subcommand(_lowerCAmelCase ) PTtoTFCommand.register_subcommand(_lowerCAmelCase ) # Let's go A : Tuple = parser.parse_args() if not hasattr(_lowerCAmelCase , """func""" ): parser.print_help() exit(1 ) # Run A : Any = args.func(_lowerCAmelCase ) service.run() if __name__ == "__main__": main()
662
0
from PIL import Image def lowercase ( __A : Image ) -> Image: '''simple docstring''' snake_case , snake_case : Any = image.size snake_case : Optional[int] = 0 snake_case : Optional[int] = image.load() for i in range(__A ): for j in range(__A ): snake_case : Tuple = pixels[j, i] mean += pixel mean //= width * height for j in range(__A ): for i in range(__A ): snake_case : List[str] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __lowercase : List[Any] = mean_threshold(Image.open('''path_to_image''').convert('''L''')) image.save('''output_image_path''')
36
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_:int = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Optional[int] = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Any = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
662
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem UpperCamelCase : List[Any] = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 UpperCamelCase : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def UpperCamelCase_ ( __a ) -> str: if "://" in dataset_path: a__ : Optional[int] = dataset_path.split("://" )[1] return dataset_path def UpperCamelCase_ ( __a ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = not is_remote_filesystem(__a ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) ) else: fs.mv(__a , __a , recursive=__a ) def UpperCamelCase_ ( ) -> None: if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: a__ : List[str] = None a__ : Any = None a__ : List[Any] = threading.Lock()
37
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]: """simple docstring""" A : Optional[int] = int(_lowerCAmelCase ) # Initialize Result A : int = [] # Traverse through all denomination for denomination in reversed(_lowerCAmelCase ): # Find denominations while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ): total_value -= int(_lowerCAmelCase ) answer.append(_lowerCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": SCREAMING_SNAKE_CASE_:List[Any] = [] SCREAMING_SNAKE_CASE_:Dict = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F"""Following is minimal change for {value}: """) SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
662
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : Any = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = '''resnet''' lowerCamelCase__ = ['''basic''', '''bottleneck'''] def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ): super().__init__(**__SCREAMING_SNAKE_CASE ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) snake_case__ : List[Any] = num_channels snake_case__ : str = embedding_size snake_case__ : List[Any] = hidden_sizes snake_case__ : Dict = depths snake_case__ : List[Any] = layer_type snake_case__ : int = hidden_act snake_case__ : Union[str, Any] = downsample_in_first_stage snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )] snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices( out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = version.parse('''1.11''' ) @property def __UpperCamelCase ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __UpperCamelCase ( self ): return 1e-3
38
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab)))) SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname) SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""") SCREAMING_SNAKE_CASE_:str = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
662
0
import re from filelock import FileLock try: import nltk lowerCAmelCase_ = True except (ImportError, ModuleNotFoundError): lowerCAmelCase_ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) )
39
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:int = """Hello, World!""" SCREAMING_SNAKE_CASE_:List[Any] = """en_XX""" def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : Optional[int] = Path("""data_bin""" ) A : Optional[Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , ) xmod.eval() # disable dropout print(_lowerCAmelCase ) A : Any = xmod.model.encoder.sentence_encoder A : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our X-MOD config:""" , _lowerCAmelCase ) A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase ) model.eval() # Now let's copy all the weights. # Embeddings A : Any = xmod_sent_encoder.embed_tokens.weight A : int = xmod_sent_encoder.embed_positions.weight A : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. A : Dict = xmod_sent_encoder.layernorm_embedding.weight A : int = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer A : str = model.roberta.encoder.layer[i] A : Tuple = xmod_sent_encoder.layers[i] # self attention A : Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("""Dimensions of self-attention weights do not match.""" ) A : List[str] = xmod_layer.self_attn.q_proj.weight A : Optional[int] = xmod_layer.self_attn.q_proj.bias A : List[Any] = xmod_layer.self_attn.k_proj.weight A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias A : Optional[int] = xmod_layer.self_attn.v_proj.weight A : Dict = xmod_layer.self_attn.v_proj.bias # self-attention output A : Optional[Any] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("""Dimensions of self-attention output weights do not match.""" ) A : Optional[Any] = xmod_layer.self_attn.out_proj.weight A : Dict = xmod_layer.self_attn.out_proj.bias A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight A : str = xmod_layer.self_attn_layer_norm.bias # intermediate A : str = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of intermediate weights do not match.""" ) A : Optional[int] = xmod_layer.fca.weight A : Optional[int] = xmod_layer.fca.bias # output A : Dict = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of feed-forward weights do not match.""" ) A : Union[str, Any] = xmod_layer.fca.weight A : int = xmod_layer.fca.bias A : List[str] = xmod_layer.final_layer_norm.weight A : Optional[Any] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: A : str = xmod_layer.adapter_layer_norm.weight A : str = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("""Lists of language adapters do not match.""" ) for lang_code, adapter in xmod_layer.adapter_modules.items(): A : Optional[int] = bert_output.adapter_modules[lang_code] A : int = xmod_layer.adapter_modules[lang_code] A : Optional[Any] = from_adapter.fca.weight A : Optional[Any] = from_adapter.fca.bias A : List[str] = from_adapter.fca.weight A : Any = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: A : Dict = xmod_sent_encoder.layer_norm.weight A : int = xmod_sent_encoder.layer_norm.bias if classification_head: A : int = xmod.model.classification_heads["""mnli"""].dense.weight A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head A : Any = xmod.model.encoder.lm_head.dense.weight A : Tuple = xmod.model.encoder.lm_head.dense.bias A : Any = xmod.model.encoder.lm_head.layer_norm.weight A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias A : Union[str, Any] = xmod.model.encoder.lm_head.weight A : Tuple = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_lowerCAmelCase ) A : List[str] = model(_lowerCAmelCase )[0] if classification_head: A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) ) else: A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) A : str = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
662
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : str = ShapEPipeline UpperCAmelCase__ : Union[str, Any] = ["prompt"] UpperCAmelCase__ : List[str] = ["prompt"] UpperCAmelCase__ : str = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] UpperCAmelCase__ : Optional[Any] = False @property def snake_case_ ( self ) -> List[Any]: return 32 @property def snake_case_ ( self ) -> List[Any]: return 32 @property def snake_case_ ( self ) -> Dict: return self.time_input_dim * 4 @property def snake_case_ ( self ) -> Optional[int]: return 8 @property def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def snake_case_ ( self ) -> Tuple: torch.manual_seed(0 ) UpperCamelCase : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ ) @property def snake_case_ ( self ) -> Tuple: torch.manual_seed(0 ) UpperCamelCase : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } UpperCamelCase : str = PriorTransformer(**SCREAMING_SNAKE_CASE_ ) return model @property def snake_case_ ( self ) -> Tuple: torch.manual_seed(0 ) UpperCamelCase : List[Any] = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } UpperCamelCase : Tuple = ShapERenderer(**SCREAMING_SNAKE_CASE_ ) return model def snake_case_ ( self ) -> str: UpperCamelCase : List[Any] = self.dummy_prior UpperCamelCase : Union[str, Any] = self.dummy_text_encoder UpperCamelCase : List[str] = self.dummy_tokenizer UpperCamelCase : Dict = self.dummy_renderer UpperCamelCase : List[Any] = HeunDiscreteScheduler( beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, ) UpperCamelCase : Tuple = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Dict: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def snake_case_ ( self ) -> Any: UpperCamelCase : int = 'cpu' UpperCamelCase : str = self.get_dummy_components() UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : int = output.images[0] UpperCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase : Optional[Any] = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ ( self ) -> List[Any]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Optional[Any] = torch_device == 'cpu' UpperCamelCase : Tuple = True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = 1 UpperCamelCase : List[Any] = 2 UpperCamelCase : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase : Dict = batch_size * [inputs[key]] UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ) -> int: UpperCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) UpperCamelCase : Optional[Any] = ShapEPipeline.from_pretrained('openai/shap-e' ) UpperCamelCase : Tuple = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) UpperCamelCase : List[Any] = pipe( 'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
40
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): A : Any = tempfile.mkdtemp() A : List[str] = BlipImageProcessor() A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self, **lowerCamelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer def _lowerCAmelCase ( self, **lowerCamelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor def _lowerCAmelCase ( self ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )] A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" ) A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 ) A : Dict = BlipProcessor.from_pretrained( self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = self.get_image_processor() A : str = self.get_tokenizer() A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Any = self.prepare_image_inputs() A : int = image_processor(lowerCamelCase__, return_tensors="""np""" ) A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def _lowerCAmelCase ( self ): A : List[str] = self.get_image_processor() A : int = self.get_tokenizer() A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[Any] = """lower newer""" A : List[Any] = processor(text=lowerCamelCase__ ) A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def _lowerCAmelCase ( self ): A : List[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[int] = """lower newer""" A : Union[str, Any] = self.prepare_image_inputs() A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase__ ): processor() def _lowerCAmelCase ( self ): A : List[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A : Optional[int] = processor.batch_decode(lowerCamelCase__ ) A : Dict = tokenizer.batch_decode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = self.get_image_processor() A : int = self.get_tokenizer() A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[int] = """lower newer""" A : List[str] = self.prepare_image_inputs() A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
662
0
'''simple docstring''' from importlib import import_module from .logging import get_logger lowerCAmelCase__ = get_logger(__name__) class lowercase_ : """simple docstring""" def __init__( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Union[str, Any]=None ): __lowercase = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self ,lowercase__ ,getattr(lowercase__ ,lowercase__ ) ) __lowercase = module._original_module if isinstance(lowercase__ ,_PatchedModuleObj ) else module class lowercase_ : """simple docstring""" SCREAMING_SNAKE_CASE : str = [] def __init__( self : Tuple ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Any=None ): __lowercase = obj __lowercase = target __lowercase = new __lowercase = target.split('''.''' )[0] __lowercase = {} __lowercase = attrs or [] def __enter__( self : Union[str, Any] ): *__lowercase , __lowercase = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase__ ) ): try: __lowercase = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __lowercase = getattr(self.obj ,lowercase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase__ ,_PatchedModuleObj ) and obj_attr._original_module is submodule) ): __lowercase = obj_attr # patch at top level setattr(self.obj ,lowercase__ ,_PatchedModuleObj(lowercase__ ,attrs=self.attrs ) ) __lowercase = getattr(self.obj ,lowercase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase__ ,lowercase__ ,_PatchedModuleObj(getattr(lowercase__ ,lowercase__ ,lowercase__ ) ,attrs=self.attrs ) ) __lowercase = getattr(lowercase__ ,lowercase__ ) # finally set the target attribute setattr(lowercase__ ,lowercase__ ,self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __lowercase = getattr(import_module('''.'''.join(lowercase__ ) ) ,lowercase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj ,lowercase__ ) is attr_value: __lowercase = getattr(self.obj ,lowercase__ ) setattr(self.obj ,lowercase__ ,self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __lowercase = globals()['''__builtins__'''][target_attr] setattr(self.obj ,lowercase__ ,self.new ) else: raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." ) def __exit__( self : Tuple ,*lowercase__ : Any ): for attr in list(self.original ): setattr(self.obj ,lowercase__ ,self.original.pop(lowercase__ ) ) def SCREAMING_SNAKE_CASE ( self : int ): self.__enter__() self._active_patches.append(self ) def SCREAMING_SNAKE_CASE ( self : Dict ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
41
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ): return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy''' def _lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ): A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ ) return image def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ): A : str = jnp.bfloataa if fpaa else jnp.floataa A : Union[str, Any] = """bf16""" if fpaa else None A , A : str = FlaxUNetaDConditionModel.from_pretrained( lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ ) return model, params def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ): A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ ) A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ ) A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ ) A : Optional[Any] = model.apply( {"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample assert sample.shape == latents.shape A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ ) A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ ) A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ ) A : Dict = model.apply( {"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample assert sample.shape == latents.shape A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
662
0
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
42
from typing import Any import numpy as np def __UpperCamelCase ( _lowerCAmelCase ) -> bool: """simple docstring""" return np.array_equal(_lowerCAmelCase , matrix.conjugate().T ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : Any = v.conjugate().T A : List[Any] = v_star.dot(_lowerCAmelCase ) assert isinstance(_lowerCAmelCase , np.ndarray ) return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase )) def __UpperCamelCase ( ) -> None: """simple docstring""" A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) A : str = np.array([[1], [2], [3]] ) assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.''' print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) ) A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.''' assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
662
0
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('''Quantized models are not supported.''' ) lowercase__ = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , SCREAMING_SNAKE_CASE ) if matches: lowercase__ = float(matches[1] ) lowercase__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". lowercase__ = 10_01 lowercase__ = '''imagenet-1k-id2label.json''' lowercase__ = '''huggingface/label-files''' lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()} lowercase__ = '''background''' lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} return config def _a ( ): """simple docstring""" lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ): """simple docstring""" lowercase__ = get_mobilenet_va_config(SCREAMING_SNAKE_CASE ) # Load 🤗 model lowercase__ = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor lowercase__ = MobileNetVaImageProcessor( crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , ) lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowercase__ = model(**SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": lowercase__ = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": lowercase__ = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: lowercase__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: print('''Pushing to the hub...''' ) lowercase__ = '''google/''' + model_name image_processor.push_to_hub(SCREAMING_SNAKE_CASE ) model.push_to_hub(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowerCAmelCase = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
43
from __future__ import annotations import numpy as np def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]: """simple docstring""" A , A : int = np.shape(_lowerCAmelCase ) if rows != columns: A : Union[str, Any] = ( """'table' has to be of square shaped array but got a """ f'''{rows}x{columns} array:\n{table}''' ) raise ValueError(_lowerCAmelCase ) A : Union[str, Any] = np.zeros((rows, columns) ) A : Dict = np.zeros((rows, columns) ) for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) ) if upper[j][j] == 0: raise ArithmeticError("""No LU decomposition exists""" ) A : Any = (table[i][j] - total) / upper[j][j] A : Union[str, Any] = 1 for j in range(_lowerCAmelCase , _lowerCAmelCase ): A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) ) A : str = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
662
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = { 'tanreinama/GPTSAN-2.8B-spout_is_uniform': ( 'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json' ), } class UpperCAmelCase__ ( A ): lowerCAmelCase_ = 'gptsan-japanese' lowerCAmelCase_ = [ 'past_key_values', ] lowerCAmelCase_ = { 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : List[str],__A : Union[str, Any]=3_6_0_0_0,__A : Any=1_2_8_0,__A : List[str]=1_0_2_4,__A : List[str]=8_1_9_2,__A : Any=4_0_9_6,__A : int=1_2_8,__A : List[Any]=1_0,__A : Any=0,__A : int=1_6,__A : str=1_6,__A : str=1_2_8,__A : List[str]=0.0,__A : int=1e-5,__A : List[str]=False,__A : List[Any]=0.0,__A : Optional[int]="float32",__A : Any=False,__A : List[Any]=False,__A : Any=False,__A : Dict=0.002,__A : Tuple=False,__A : Optional[Any]=True,__A : Union[str, Any]=3_5_9_9_8,__A : List[Any]=3_5_9_9_5,__A : Tuple=3_5_9_9_9,**__A : List[Any],): _lowerCamelCase : int = vocab_size _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Dict = d_model _lowerCamelCase : List[str] = d_ff _lowerCamelCase : int = d_ext _lowerCamelCase : Optional[Any] = d_spout _lowerCamelCase : int = num_switch_layers _lowerCamelCase : Dict = num_ext_layers _lowerCamelCase : List[str] = num_switch_layers + num_ext_layers _lowerCamelCase : List[str] = num_heads _lowerCamelCase : Tuple = num_experts _lowerCamelCase : List[str] = expert_capacity _lowerCamelCase : str = dropout_rate _lowerCamelCase : List[Any] = layer_norm_epsilon _lowerCamelCase : Optional[int] = router_bias _lowerCamelCase : List[str] = router_jitter_noise _lowerCamelCase : int = router_dtype _lowerCamelCase : Optional[int] = router_ignore_padding_tokens _lowerCamelCase : Optional[Any] = output_hidden_states _lowerCamelCase : Optional[int] = output_attentions _lowerCamelCase : List[Any] = initializer_factor _lowerCamelCase : Union[str, Any] = output_router_logits _lowerCamelCase : Optional[Any] = use_cache super().__init__( separator_token_id=__A,pad_token_id=__A,eos_token_id=__A,**__A,)
44
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ): A : Optional[int] = round(val / multiple ) * multiple if max_val is not None and x > max_val: A : Optional[Any] = math.floor(val / multiple ) * multiple if x < min_val: A : Any = math.ceil(val / multiple ) * multiple return x A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size A , A : List[Any] = get_image_size(_lowerCAmelCase ) A , A : List[Any] = output_size # determine new height and width A : Optional[int] = output_height / input_height A : Optional[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width A : Any = scale_width else: # fit height A : int = scale_height A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase ) A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[int] = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : int = size if size is not None else {"""height""": 384, """width""": 384} A : str = get_size_dict(lowerCamelCase__ ) A : Optional[Any] = do_resize A : Optional[int] = size A : Union[str, Any] = keep_aspect_ratio A : int = ensure_multiple_of A : Dict = resample A : Optional[Any] = do_rescale A : Any = rescale_factor A : str = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Dict = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) A : Optional[Any] = get_resize_output_image_size( lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, ) return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__ ) A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of A : Tuple = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : int = rescale_factor if rescale_factor is not None else self.rescale_factor A : int = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : Optional[int] = image_std if image_std is not None else self.image_std A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : str = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(lowerCamelCase__ ): A : int = target_sizes.numpy() A : Union[str, Any] = [] for idx in range(len(lowerCamelCase__ ) ): A : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ ) A : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase__ ) else: A : List[str] = logits.argmax(dim=1 ) A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
662
0
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = "▁" UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Dict = BigBirdTokenizer _snake_case : List[Any] = BigBirdTokenizerFast _snake_case : Any = True _snake_case : Optional[int] = True def __a ( self :Union[str, Any] ): super().setUp() UpperCamelCase__ :List[Any] = self.tokenizer_class(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self :str ): UpperCamelCase__ :List[str] = """<s>""" UpperCamelCase__ :str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def __a ( self :Any ): UpperCamelCase__ :Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """[MASK]""" ) self.assertEqual(len(lowerCamelCase__ ) , 10_04 ) def __a ( self :Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __a ( self :Optional[Any] ): if not self.test_rust_tokenizer: return UpperCamelCase__ :Any = self.get_tokenizer() UpperCamelCase__ :str = self.get_rust_tokenizer() UpperCamelCase__ :List[Any] = """I was born in 92000, and this is falsé.""" UpperCamelCase__ :List[str] = tokenizer.tokenize(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ :Tuple = self.get_rust_tokenizer() UpperCamelCase__ :Any = tokenizer.encode(lowerCamelCase__ ) UpperCamelCase__ :Tuple = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def __a ( self :Optional[Any] ): UpperCamelCase__ :Dict = BigBirdTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) UpperCamelCase__ :str = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCamelCase__ :List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCamelCase__ :Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCamelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def __a ( self :Dict ): return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" ) @slow def __a ( self :List[str] ): UpperCamelCase__ :Dict = """Hello World!""" UpperCamelCase__ :Any = [65, 1_85_36, 22_60, 1_01, 66] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def __a ( self :str ): UpperCamelCase__ :Optional[Any] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) # fmt: off UpperCamelCase__ :Any = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231 # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @require_torch @slow def __a ( self :str ): import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence UpperCamelCase__ :Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCamelCase__ :Optional[Any] = """ """.join(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ ) UpperCamelCase__ :List[Any] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = BigBirdConfig(attention_type="""original_full""" ) UpperCamelCase__ :List[str] = BigBirdModel(lowerCamelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowerCamelCase__ ) model(**lowerCamelCase__ ) @slow def __a ( self :List[str] ): UpperCamelCase__ :Any = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" ) UpperCamelCase__ :Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids ) self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" ) @slow def __a ( self :Union[str, Any] ): # fmt: off UpperCamelCase__ :int = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
45
class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__ ): # we need a list not a string, so do something to change the type A : List[Any] = arr.split(""",""" ) def _lowerCAmelCase ( self ): A : int = [int(self.array[0] )] * len(self.array ) A : Optional[Any] = [int(self.array[0] )] * len(self.array ) for i in range(1, len(self.array ) ): A : Union[str, Any] = max( int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) ) A : Dict = max(sum_value[i], rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""") SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array) SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array() print(("""the results is:""", re))
662
0
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : List[Any] = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
46
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:List[Any] = { """google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[Any] = "bit" __lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"] __lowerCamelCase : Union[str, Any] = ["SAME", "VALID"] def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A : List[Any] = global_padding.upper() else: raise ValueError(f'''Padding strategy {global_padding} not supported''' ) A : Dict = num_channels A : List[Any] = embedding_size A : Optional[Any] = hidden_sizes A : str = depths A : str = layer_type A : Union[str, Any] = hidden_act A : Any = global_padding A : Optional[int] = num_groups A : Dict = drop_path_rate A : List[Any] = embedding_dynamic_padding A : List[Any] = output_stride A : Union[str, Any] = width_factor A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )] A , A : Any = get_aligned_output_features_output_indices( out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
662
0
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = '''cpu''' SCREAMING_SNAKE_CASE__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' SCREAMING_SNAKE_CASE__ = '''path-to-your-trained-model''' SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: SCREAMING_SNAKE_CASE__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE__ = pipe.to(device) # to channels last SCREAMING_SNAKE_CASE__ = pipe.unet.to(memory_format=torch.channels_last) SCREAMING_SNAKE_CASE__ = pipe.vae.to(memory_format=torch.channels_last) SCREAMING_SNAKE_CASE__ = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: SCREAMING_SNAKE_CASE__ = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex SCREAMING_SNAKE_CASE__ = torch.randn(2, 4, 64, 64) SCREAMING_SNAKE_CASE__ = torch.rand(1) * 999 SCREAMING_SNAKE_CASE__ = torch.randn(2, 77, 768) SCREAMING_SNAKE_CASE__ = (sample, timestep, encoder_hidden_status) try: SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute SCREAMING_SNAKE_CASE__ = 666 SCREAMING_SNAKE_CASE__ = torch.Generator(device).manual_seed(seed) SCREAMING_SNAKE_CASE__ = {'''generator''': generator} if args.steps is not None: SCREAMING_SNAKE_CASE__ = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): SCREAMING_SNAKE_CASE__ = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
47
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ): A : List[str] = parent A : List[str] = batch_size A : Optional[int] = seq_length A : Optional[int] = is_training A : Tuple = use_input_mask A : Optional[Any] = vocab_size A : str = hidden_size A : Any = num_hidden_layers A : List[Any] = num_attention_heads A : Optional[int] = intermediate_size A : int = hidden_act A : Dict = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : List[Any] = max_position_embeddings A : int = initializer_range A : Tuple = use_labels A : List[str] = scope def _lowerCAmelCase ( self ): A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : int = None if self.use_input_mask: A : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def _lowerCAmelCase ( self ): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, ) def _lowerCAmelCase ( self ): ( ( A ) , ( A ) , ( A ) , ( A ) , ) : List[Any] = self.prepare_config_and_inputs() A : Any = True A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : str = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ ) A : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, ) A : Optional[Any] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : Union[str, Any] = True A : Optional[int] = True A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval() # first forward pass A : int = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, ) A : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size ) A : int = ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 ) A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 ) A : List[str] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] # select random slice A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item() A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() A : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ): A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self ): A , A , A , A : str = self.prepare_config_and_inputs() A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else () __lowerCamelCase : List[Any] = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def _lowerCAmelCase ( self ): A : Any = BertGenerationEncoderTester(self ) A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 ) def _lowerCAmelCase ( self ): self.config_tester.run_common_tests() def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs() A : Any = """bert""" self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): # This regression test was failing with PyTorch < 1.3 ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() A : int = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ) def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def _lowerCAmelCase ( self ): A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Union[str, Any] = model(lowerCamelCase__ )[0] A : List[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Tuple = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Dict = model(lowerCamelCase__ )[0] A : List[str] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Optional[Any] = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
662
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device UpperCAmelCase__ : str = False class A ( unittest.TestCase ): pass @slow @require_torch_gpu class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe( image=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images lowerCAmelCase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
48
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : str = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384} A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Optional[Any] = do_resize A : Dict = size # Default value set here for backwards compatibility where the value in config is None A : Dict = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : List[str] = do_rescale A : Tuple = rescale_factor A : Optional[int] = do_normalize A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) A : List[str] = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : int = int(shortest_edge / crop_pct ) A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Dict = do_resize if do_resize is not None else self.do_resize A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct A : str = resample if resample is not None else self.resample A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor A : Dict = do_normalize if do_normalize is not None else self.do_normalize A : List[str] = image_mean if image_mean is not None else self.image_mean A : Optional[Any] = image_std if image_std is not None else self.image_std A : Optional[Any] = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Dict = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
662
0
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Tuple ): __UpperCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() __UpperCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) __UpperCAmelCase = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } __UpperCAmelCase = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_60_00, '''return_attention_mask''': False, '''do_normalize''': True, } __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join(self.tmpdirname , _lowercase ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) # load decoder from hub __UpperCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder''' def a ( self : Any , **_lowercase : Optional[int] ): __UpperCAmelCase = self.add_kwargs_tokens_map.copy() kwargs.update(_lowercase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def a ( self : Optional[Any] , **_lowercase : Any ): return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowercase ) def a ( self : Optional[Any] , **_lowercase : Optional[Any] ): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowercase ) def a ( self : Optional[Any] ): shutil.rmtree(self.tmpdirname ) def a ( self : Tuple ): __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_feature_extractor() __UpperCAmelCase = self.get_decoder() __UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowercase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _lowercase ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _lowercase ) def a ( self : Dict ): __UpperCAmelCase = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def a ( self : str ): __UpperCAmelCase = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_lowercase , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_lowercase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def a ( self : Dict ): __UpperCAmelCase = self.get_feature_extractor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_decoder() __UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __UpperCAmelCase = floats_list((3, 10_00) ) __UpperCAmelCase = feature_extractor(_lowercase , return_tensors='''np''' ) __UpperCAmelCase = processor(_lowercase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a ( self : str ): __UpperCAmelCase = self.get_feature_extractor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_decoder() __UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __UpperCAmelCase = '''This is a test string''' __UpperCAmelCase = processor(text=_lowercase ) __UpperCAmelCase = tokenizer(_lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self : Optional[int] , _lowercase : Any=(2, 10, 16) , _lowercase : str=77 ): np.random.seed(_lowercase ) return np.random.rand(*_lowercase ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.get_feature_extractor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_decoder() __UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 ) __UpperCAmelCase = processor.decode(_lowercase ) __UpperCAmelCase = decoder.decode_beams(_lowercase )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def a ( self : int , _lowercase : Dict ): __UpperCAmelCase = self.get_feature_extractor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_decoder() __UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __UpperCAmelCase = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __UpperCAmelCase = processor.batch_decode(_lowercase ) else: with get_context(_lowercase ).Pool() as pool: __UpperCAmelCase = processor.batch_decode(_lowercase , _lowercase ) __UpperCAmelCase = list(_lowercase ) with get_context('''fork''' ).Pool() as p: __UpperCAmelCase = decoder.decode_beams_batch(_lowercase , _lowercase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_lowercase , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_lowercase , decoded_processor.logit_score ) self.assertListEqual(_lowercase , decoded_processor.lm_score ) def a ( self : Dict ): __UpperCAmelCase = self.get_feature_extractor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_decoder() __UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __UpperCAmelCase = self._get_dummy_logits() __UpperCAmelCase = 15 __UpperCAmelCase = -20.0 __UpperCAmelCase = -4.0 __UpperCAmelCase = processor.batch_decode( _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , ) __UpperCAmelCase = decoded_processor_out.text __UpperCAmelCase = list(_lowercase ) with get_context('''fork''' ).Pool() as pool: __UpperCAmelCase = decoder.decode_beams_batch( _lowercase , _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , ) __UpperCAmelCase = [d[0][0] for d in decoded_decoder_out] __UpperCAmelCase = [d[0][2] for d in decoded_decoder_out] __UpperCAmelCase = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _lowercase ) self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _lowercase , atol=1E-3 ) ) self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , _lowercase , atol=1E-3 ) ) def a ( self : int ): __UpperCAmelCase = self.get_feature_extractor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_decoder() __UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) __UpperCAmelCase = self._get_dummy_logits() __UpperCAmelCase = 2.0 __UpperCAmelCase = 5.0 __UpperCAmelCase = -20.0 __UpperCAmelCase = True __UpperCAmelCase = processor.batch_decode( _lowercase , alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , ) __UpperCAmelCase = decoded_processor_out.text __UpperCAmelCase = list(_lowercase ) decoder.reset_params( alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , ) with get_context('''fork''' ).Pool() as pool: __UpperCAmelCase = decoder.decode_beams_batch( _lowercase , _lowercase , ) __UpperCAmelCase = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _lowercase ) __UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _lowercase ) def a ( self : Dict ): __UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] __UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __UpperCAmelCase = os.listdir(_lowercase ) __UpperCAmelCase = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_lowercase , _lowercase ) def a ( self : List[Any] ): __UpperCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' ) __UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(_lowercase ) __UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] __UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __UpperCAmelCase = os.listdir(_lowercase ) __UpperCAmelCase = os.listdir(_lowercase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_lowercase , _lowercase ) def a ( self : int ): __UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCAmelCase = floats_list((3, 10_00) ) __UpperCAmelCase = processor_wavaveca(_lowercase , return_tensors='''np''' ) __UpperCAmelCase = processor_auto(_lowercase , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) __UpperCAmelCase = self._get_dummy_logits() __UpperCAmelCase = processor_wavaveca.batch_decode(_lowercase ) __UpperCAmelCase = processor_auto.batch_decode(_lowercase ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def a ( self : Optional[int] ): __UpperCAmelCase = self.get_feature_extractor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_decoder() __UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def a ( _lowercase : List[Any] , _lowercase : Optional[Any] ): __UpperCAmelCase = [d[key] for d in offsets] return retrieved_list def a ( self : Tuple ): __UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCAmelCase = self._get_dummy_logits()[0] __UpperCAmelCase = processor.decode(_lowercase , output_word_offsets=_lowercase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_lowercase , _lowercase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def a ( self : str ): __UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCAmelCase = self._get_dummy_logits() __UpperCAmelCase = processor.batch_decode(_lowercase , output_word_offsets=_lowercase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_lowercase , _lowercase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def a ( self : Union[str, Any] ): import torch __UpperCAmelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_lowercase ) __UpperCAmelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) ) __UpperCAmelCase = iter(_lowercase ) __UpperCAmelCase = next(_lowercase ) __UpperCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) __UpperCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __UpperCAmelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): __UpperCAmelCase = model(_lowercase ).logits.cpu().numpy() __UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=_lowercase ) __UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __UpperCAmelCase = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] __UpperCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , _lowercase ) self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , output.text ) # output times __UpperCAmelCase = torch.tensor(self.get_from_offsets(_lowercase , '''start_time''' ) ) __UpperCAmelCase = torch.tensor(self.get_from_offsets(_lowercase , '''end_time''' ) ) # fmt: off __UpperCAmelCase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) __UpperCAmelCase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) ) self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
49
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" A : Dict = """backbone.""" if is_semantic else """""" A : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', """beit.embeddings.cls_token"""), (f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""), (f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""), (f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("""mask_token""", """beit.embeddings.mask_token"""), ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) else: # layernorm + classification head rename_keys.extend( [ ("""fc_norm.weight""", """beit.pooler.layernorm.weight"""), ("""fc_norm.bias""", """beit.pooler.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" for i in range(config.num_hidden_layers ): A : Dict = """backbone.""" if is_semantic else """""" # queries, keys and values A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) A : int = in_proj_weight[ : config.hidden_size, : ] A : Any = q_bias A : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A : Tuple = in_proj_weight[ -config.hidden_size :, : ] A : Union[str, Any] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) A : Dict = gamma_a A : Dict = gamma_a def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: """simple docstring""" A : List[str] = dct.pop(_lowerCAmelCase ) A : Optional[Any] = val def __UpperCamelCase ( ) -> List[str]: """simple docstring""" A : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str: """simple docstring""" A : Dict = False if """rvlcdip""" in checkpoint_url else True A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: A : Dict = 1024 A : List[Any] = 4096 A : int = 24 A : int = 16 # labels if "rvlcdip" in checkpoint_url: A : List[Any] = 16 A : List[Any] = """huggingface/label-files""" A : int = """rvlcdip-id2label.json""" A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} A : int = idalabel A : Union[str, Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""] A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase ) # load HuggingFace model A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase ) model.eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A : Any = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase ) A : int = prepare_img() A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) A : str = encoding["""pixel_values"""] A : Tuple = model(_lowerCAmelCase ) A : Optional[int] = outputs.logits # verify logits A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: if has_lm_head: A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large""" else: A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip""" image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
662
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def UpperCamelCase_ ( self ): lowerCamelCase__ = self.dummy_uncond_unet lowerCamelCase__ = ScoreSdeVeScheduler() lowerCamelCase__ = ScoreSdeVePipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) sde_ve.to(_lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=_lowerCAmelCase ).images lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=_lowerCAmelCase ,return_dict=_lowerCAmelCase )[ 0 ] lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = """google/ncsnpp-church-256""" lowerCamelCase__ = UNetaDModel.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = ScoreSdeVeScheduler.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = ScoreSdeVePipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) sde_ve.to(_lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=10 ,output_type="""numpy""" ,generator=_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) lowerCamelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
50
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""", lowerCamelCase__, ) super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
662
0
'''simple docstring''' from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "" ) -> dict[str, float]: """simple docstring""" UpperCAmelCase = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' UpperCAmelCase = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' ) UpperCAmelCase = soup.find_all('''td''' , attrs='''titleColumn''' ) UpperCAmelCase = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) } def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "IMDb_Top_250_Movies.csv" ) -> None: """simple docstring""" UpperCAmelCase = get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''''' ) as out_file: UpperCAmelCase = csv.writer(SCREAMING_SNAKE_CASE_ ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
51
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, ) A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths} A : str = Text( cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, ) def _lowerCAmelCase ( self ): # Build iterable dataset if self.streaming: A : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A : List[str] = None A : Dict = None A : Tuple = None A : Tuple = None self.builder.download_and_prepare( download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, ) A : List[str] = self.builder.as_dataset( split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory ) return dataset
662
0
"""simple docstring""" from datetime import datetime import requests def __A ( a_ :str) -> bytes: __a : int = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __a : Optional[Any] = requests.get(base_url + url).json()[0]['''urls'''][0]['''src'''] return requests.get(a_).content if __name__ == "__main__": A = input('''Enter Video/IGTV url: ''').strip() A = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, '''wb''') as fp: fp.write(download_video(url)) print(F'Done. Video saved to disk as {file_name}.')
52
from typing import TYPE_CHECKING from ....utils import _LazyModule SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
662
0
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int: __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = num_channels __lowerCAmelCase = embeddings_size __lowerCAmelCase = hidden_sizes __lowerCAmelCase = depths __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_act __lowerCAmelCase = num_labels __lowerCAmelCase = scope __lowerCAmelCase = len(lowerCAmelCase_ ) def lowercase ( self : Optional[int] ) -> List[Any]: __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = self.get_config() return config, pixel_values def lowercase ( self : Tuple ) -> List[Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str: __lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ ) __lowerCAmelCase = model(lowerCAmelCase_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple: __lowerCAmelCase = self.num_labels __lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ ) __lowerCAmelCase = model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : List[Any] ) -> Optional[Any]: __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ): """simple docstring""" a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () a_ = False a_ = False a_ = False def lowercase ( self : Dict ) -> None: __lowerCAmelCase = FlaxRegNetModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def lowercase ( self : int ) -> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : str ) -> Union[str, Any]: return def lowercase ( self : Dict ) -> str: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowercase ( self : Union[str, Any] ) -> Tuple: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @unittest.skip(reason='RegNet does not use inputs_embeds' ) def lowercase ( self : Union[str, Any] ) -> Any: pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def lowercase ( self : Tuple ) -> Tuple: pass def lowercase ( self : Optional[Any] ) -> str: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(lowerCAmelCase_ ) __lowerCAmelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowercase ( self : List[Any] ) -> Union[str, Any]: def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowerCAmelCase = model_class(lowerCAmelCase_ ) __lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) __lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCAmelCase = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCAmelCase = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase ( self : str ) -> str: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = model_class(lowerCAmelCase_ ) @jax.jit def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ): return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ ) with self.subTest('JIT Enabled' ): __lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def a_ ( ): __lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase ( self : Union[str, Any] ) -> Optional[Any]: return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None @slow def lowercase ( self : Optional[Any] ) -> Union[str, Any]: __lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' ) __lowerCAmelCase = model(**lowerCAmelCase_ ) # verify the logits __lowerCAmelCase = (1, 1_0_0_0) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) __lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
53
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int: """simple docstring""" A , A : str = 1, 1 A : List[Any] = [] for i in range(1 , n + 1 ): A : Optional[int] = prev_numerator + 2 * prev_denominator A : Any = prev_numerator + prev_denominator if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ): result.append(_lowerCAmelCase ) A : int = numerator A : int = denominator return len(_lowerCAmelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
662
0
def a__ ( lowercase__ , lowercase__ ): '''simple docstring''' if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
54
import re def __UpperCamelCase ( _lowerCAmelCase ) -> str: """simple docstring""" if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
662
0
from sklearn.metrics import fa_score import datasets SCREAMING_SNAKE_CASE :Optional[int] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' SCREAMING_SNAKE_CASE :int = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' SCREAMING_SNAKE_CASE :List[str] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : str ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) ,reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] ,) def UpperCamelCase_ ( self : Dict ,A : Dict ,A : str ,A : str=None ,A : Tuple=1 ,A : Any="binary" ,A : List[Any]=None ): __A = fa_score( A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A ) return {"f1": float(A ) if score.size == 1 else score}
55
from __future__ import annotations SCREAMING_SNAKE_CASE_:Tuple = """#""" class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self ): A : dict = {} def _lowerCAmelCase ( self, lowerCamelCase__ ): A : List[Any] = self._trie for char in text: if char not in trie: A : str = {} A : str = trie[char] A : Optional[int] = True def _lowerCAmelCase ( self, lowerCamelCase__ ): A : Dict = self._trie for char in prefix: if char in trie: A : Optional[Any] = trie[char] else: return [] return self._elements(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__ ): A : int = [] for c, v in d.items(): A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )] result.extend(lowerCamelCase__ ) return tuple(lowerCamelCase__ ) SCREAMING_SNAKE_CASE_:Any = Trie() SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def __UpperCamelCase ( _lowerCAmelCase ) -> tuple: """simple docstring""" A : List[str] = trie.find_word(_lowerCAmelCase ) return tuple(string + word for word in suffixes ) def __UpperCamelCase ( ) -> None: """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
662
0
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _lowercase ( unittest.TestCase ): def a ( self : Any ) -> Any: __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) __snake_case = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = -1 __snake_case = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ ) __snake_case = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __snake_case = TextStreamer(SCREAMING_SNAKE_CASE_ ) model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __snake_case = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def a ( self : Optional[Any] ) -> Any: __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) __snake_case = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = -1 __snake_case = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ ) __snake_case = tokenizer.decode(greedy_ids[0] ) __snake_case = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ ) __snake_case = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer} __snake_case = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ ) thread.start() __snake_case = '' for new_text in streamer: streamer_text += new_text self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def a ( self : Optional[int] ) -> List[str]: __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) __snake_case = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = -1 __snake_case = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ ) __snake_case = greedy_ids[:, input_ids.shape[1] :] __snake_case = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __snake_case = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_prompt=SCREAMING_SNAKE_CASE_ ) model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __snake_case = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def a ( self : List[Any] ) -> int: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __snake_case = AutoTokenizer.from_pretrained('distilgpt2' ) __snake_case = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = -1 __snake_case = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: __snake_case = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __snake_case = cs.out[:-1] # Remove the final "\n" __snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def a ( self : List[Any] ) -> Union[str, Any]: __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) __snake_case = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = -1 __snake_case = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ , timeout=0.0_0_1 ) __snake_case = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer} __snake_case = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(SCREAMING_SNAKE_CASE_ ): __snake_case = '' for new_text in streamer: streamer_text += new_text
56
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = bnb_quantization_config.load_in_abit A : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) A : Any = [] # custom device map if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1: A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A : int = get_keys_to_not_convert(_lowerCAmelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_lowerCAmelCase ) A : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A : Dict = [] A : Tuple = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_lowerCAmelCase ) # compatibility with peft A : Union[str, Any] = load_in_abit A : Tuple = load_in_abit A : List[str] = get_parameter_device(_lowerCAmelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase ) # convert param to the right dtype A : Tuple = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_lowerCAmelCase ): param.to(_lowerCAmelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): A : str = replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase ) A : Optional[Any] = get_quantized_model_device_map( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A : Tuple = True A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]: """simple docstring""" if device_map is None: if torch.cuda.is_available(): A : Optional[int] = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) A : Tuple = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A : Any = {} A : List[str] = special_dtypes A : Any = no_split_module_classes A : Union[str, Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A : Tuple = get_balanced_memory( _lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , ) A : int = max_memory A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # check if don't have any quantized module on the cpu A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A : Optional[int] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: """simple docstring""" if modules_to_not_convert is None: A : Optional[Any] = [] A , A : Dict = _replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int: """simple docstring""" A : Optional[int] = False for name, module in model.named_children(): if current_key_name is None: A : int = [] current_key_name.append(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A : Dict = """.""".join(_lowerCAmelCase ) A : Optional[Any] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A : Dict = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A : Optional[Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: A : Dict = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) A : Any = module.weight.data if module.bias is not None: A : Any = module.bias.data bnb_module.requires_grad_(_lowerCAmelCase ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Dict = True if len(list(module.children() ) ) > 0: A , A : Dict = _replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" with init_empty_weights(): A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A : Optional[int] = find_tied_parameters(_lowerCAmelCase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowerCAmelCase , _lowerCAmelCase ): A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A : Optional[int] = sum(_lowerCAmelCase , [] ) A : Tuple = len(_lowerCAmelCase ) > 0 # Check if it is a base model A : List[str] = False if hasattr(_lowerCAmelCase , """base_model_prefix""" ): A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A : str = list(model.named_children() ) A : Tuple = [list_modules[-1][0]] # add last module together with tied weights A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase ) # remove ".weight" from the keys A : Union[str, Any] = [""".weight""", """.bias"""] A : Optional[int] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A : List[str] = name.replace(_lowerCAmelCase , """""" ) filtered_module_names.append(_lowerCAmelCase ) return filtered_module_names def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]: """simple docstring""" for m in model.modules(): if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ): return True return False def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]: """simple docstring""" return next(parameter.parameters() ).device def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase ) A : Tuple = param_name A : Union[str, Any] = model if "." in tensor_name: A : int = tensor_name.split(""".""" ) for split in splits[:-1]: A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) A : Optional[Any] = new_module A : List[str] = splits[-1] # offload weights A : Optional[int] = False offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , ) else: offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase ) offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase ) set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
662
0
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def _a ( self ): UpperCamelCase_: Union[str, Any] = 'ylacombe/bark-small' UpperCamelCase_: Optional[int] = tempfile.mkdtemp() UpperCamelCase_: Dict = 'en_speaker_1' UpperCamelCase_: List[Any] = 'This is a test string' UpperCamelCase_: Tuple = 'speaker_embeddings_path.json' UpperCamelCase_: Tuple = 'speaker_embeddings' def _a ( self , **_lowerCamelCase ): return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCamelCase ) def _a ( self ): shutil.rmtree(self.tmpdirname ) def _a ( self ): UpperCamelCase_: int = self.get_tokenizer() UpperCamelCase_: Dict = BarkProcessor(tokenizer=_lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase_: Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _a ( self ): UpperCamelCase_: Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) UpperCamelCase_: Optional[int] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _a ( self ): UpperCamelCase_: Union[str, Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) UpperCamelCase_: int = 3_5 UpperCamelCase_: Optional[int] = 2 UpperCamelCase_: int = 8 UpperCamelCase_: Union[str, Any] = { 'semantic_prompt': np.ones(_lowerCamelCase ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCamelCase_: Dict = processor(text=self.input_string , voice_preset=_lowerCamelCase ) UpperCamelCase_: str = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(_lowerCamelCase , **_lowerCamelCase ) UpperCamelCase_: List[Any] = processor(text=self.input_string , voice_preset=_lowerCamelCase ) UpperCamelCase_: Tuple = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCamelCase_: int = processor(text=self.input_string , voice_preset=self.voice_preset ) def _a ( self ): UpperCamelCase_: Tuple = self.get_tokenizer() UpperCamelCase_: str = BarkProcessor(tokenizer=_lowerCamelCase ) UpperCamelCase_: List[str] = processor(text=self.input_string ) UpperCamelCase_: List[Any] = tokenizer( self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
57
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def __UpperCamelCase ( ) -> Dict: """simple docstring""" A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(_lowerCAmelCase ) DownloadCommand.register_subcommand(_lowerCAmelCase ) EnvironmentCommand.register_subcommand(_lowerCAmelCase ) RunCommand.register_subcommand(_lowerCAmelCase ) ServeCommand.register_subcommand(_lowerCAmelCase ) UserCommands.register_subcommand(_lowerCAmelCase ) AddNewModelCommand.register_subcommand(_lowerCAmelCase ) AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase ) LfsCommands.register_subcommand(_lowerCAmelCase ) PTtoTFCommand.register_subcommand(_lowerCAmelCase ) # Let's go A : Tuple = parser.parse_args() if not hasattr(_lowerCAmelCase , """func""" ): parser.print_help() exit(1 ) # Run A : Any = args.func(_lowerCAmelCase ) service.run() if __name__ == "__main__": main()
662
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(cls , ["""torch"""] ) def __lowerCAmelCase ( *__UpperCamelCase : str , **__UpperCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(__UpperCamelCase , ["""torch"""] ) def __lowerCAmelCase ( *__UpperCamelCase : Any , **__UpperCamelCase : str ): '''simple docstring''' requires_backends(__UpperCamelCase , ["""torch"""] ) def __lowerCAmelCase ( *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[int] ): '''simple docstring''' requires_backends(__UpperCamelCase , ["""torch"""] ) def __lowerCAmelCase ( *__UpperCamelCase : List[str] , **__UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(__UpperCamelCase , ["""torch"""] ) def __lowerCAmelCase ( *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(__UpperCamelCase , ["""torch"""] ) def __lowerCAmelCase ( *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ): '''simple docstring''' requires_backends(__UpperCamelCase , ["""torch"""] ) def __lowerCAmelCase ( *__UpperCamelCase : List[str] , **__UpperCamelCase : List[Any] ): '''simple docstring''' requires_backends(__UpperCamelCase , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' requires_backends(cls , ["""torch"""] ) class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''torch'''] def __init__( self , *_lowercase , **_lowercase ) -> str: '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' requires_backends(cls , ["""torch"""] )
58
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_:int = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Optional[int] = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Any = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
662
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
59
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]: """simple docstring""" A : Optional[int] = int(_lowerCAmelCase ) # Initialize Result A : int = [] # Traverse through all denomination for denomination in reversed(_lowerCAmelCase ): # Find denominations while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ): total_value -= int(_lowerCAmelCase ) answer.append(_lowerCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": SCREAMING_SNAKE_CASE_:List[Any] = [] SCREAMING_SNAKE_CASE_:Dict = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F"""Following is minimal change for {value}: """) SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
662
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab)))) SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname) SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""") SCREAMING_SNAKE_CASE_:str = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
662
0
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=512 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=4 , ) -> Optional[int]: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_attention_mask lowerCAmelCase__ = use_token_type_ids lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = num_choices def a ( self : Union[str, Any] ) -> Optional[int]: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ = None if self.use_attention_mask: lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ = None if self.use_token_type_ids: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a ( self : List[str] ) -> Union[str, Any]: lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def a ( self : Optional[Any] ) -> Dict: lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = True lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = True snake_case__ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def a ( self : int ) -> Dict: lowerCAmelCase__ = FlaxRobertaPreLayerNormModelTester(self ) @slow def a ( self : Tuple ) -> Union[str, Any]: for model_class_name in self.all_model_classes: lowerCAmelCase__ = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_flax class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def a ( self : int ) -> Dict: lowerCAmelCase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] lowerCAmelCase__ = [1, 11, 50_265] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE__ ) # compare the actual values for a slice. lowerCAmelCase__ = np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) @slow def a ( self : Union[str, Any] ) -> Optional[int]: lowerCAmelCase__ = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] # compare the actual values for a slice. lowerCAmelCase__ = np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
61
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:int = """Hello, World!""" SCREAMING_SNAKE_CASE_:List[Any] = """en_XX""" def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : Optional[int] = Path("""data_bin""" ) A : Optional[Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , ) xmod.eval() # disable dropout print(_lowerCAmelCase ) A : Any = xmod.model.encoder.sentence_encoder A : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our X-MOD config:""" , _lowerCAmelCase ) A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase ) model.eval() # Now let's copy all the weights. # Embeddings A : Any = xmod_sent_encoder.embed_tokens.weight A : int = xmod_sent_encoder.embed_positions.weight A : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. A : Dict = xmod_sent_encoder.layernorm_embedding.weight A : int = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer A : str = model.roberta.encoder.layer[i] A : Tuple = xmod_sent_encoder.layers[i] # self attention A : Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("""Dimensions of self-attention weights do not match.""" ) A : List[str] = xmod_layer.self_attn.q_proj.weight A : Optional[int] = xmod_layer.self_attn.q_proj.bias A : List[Any] = xmod_layer.self_attn.k_proj.weight A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias A : Optional[int] = xmod_layer.self_attn.v_proj.weight A : Dict = xmod_layer.self_attn.v_proj.bias # self-attention output A : Optional[Any] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("""Dimensions of self-attention output weights do not match.""" ) A : Optional[Any] = xmod_layer.self_attn.out_proj.weight A : Dict = xmod_layer.self_attn.out_proj.bias A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight A : str = xmod_layer.self_attn_layer_norm.bias # intermediate A : str = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of intermediate weights do not match.""" ) A : Optional[int] = xmod_layer.fca.weight A : Optional[int] = xmod_layer.fca.bias # output A : Dict = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of feed-forward weights do not match.""" ) A : Union[str, Any] = xmod_layer.fca.weight A : int = xmod_layer.fca.bias A : List[str] = xmod_layer.final_layer_norm.weight A : Optional[Any] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: A : str = xmod_layer.adapter_layer_norm.weight A : str = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("""Lists of language adapters do not match.""" ) for lang_code, adapter in xmod_layer.adapter_modules.items(): A : Optional[int] = bert_output.adapter_modules[lang_code] A : int = xmod_layer.adapter_modules[lang_code] A : Optional[Any] = from_adapter.fca.weight A : Optional[Any] = from_adapter.fca.bias A : List[str] = from_adapter.fca.weight A : Any = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: A : Dict = xmod_sent_encoder.layer_norm.weight A : int = xmod_sent_encoder.layer_norm.bias if classification_head: A : int = xmod.model.classification_heads["""mnli"""].dense.weight A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head A : Any = xmod.model.encoder.lm_head.dense.weight A : Tuple = xmod.model.encoder.lm_head.dense.bias A : Any = xmod.model.encoder.lm_head.layer_norm.weight A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias A : Union[str, Any] = xmod.model.encoder.lm_head.weight A : Tuple = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_lowerCAmelCase ) A : List[str] = model(_lowerCAmelCase )[0] if classification_head: A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) ) else: A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) A : str = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
662
0
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: snake_case = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : str=18 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : Optional[int]=400 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , ): SCREAMING_SNAKE_CASE : Any = size if size is not None else {"height": 20, "width": 20} SCREAMING_SNAKE_CASE : Optional[int] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : List[str] = image_size SCREAMING_SNAKE_CASE : Optional[int] = min_resolution SCREAMING_SNAKE_CASE : Union[str, Any] = max_resolution SCREAMING_SNAKE_CASE : List[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : Any = do_convert_rgb SCREAMING_SNAKE_CASE : Any = [512, 1024, 2048, 4096] SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size if patch_size is not None else {"height": 16, "width": 16} def _A ( self : List[str] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : Optional[Any] = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None def _A ( self : int ): SCREAMING_SNAKE_CASE : Optional[int] = PixaStructImageProcessingTester(self ) @property def _A ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "do_convert_rgb" ) ) def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.prepare_dummy_image() SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[Any] = 2048 SCREAMING_SNAKE_CASE : Tuple = image_processor(UpperCAmelCase_ , return_tensors="pt" , max_patches=UpperCAmelCase_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) ) def _A ( self : str ): # Initialize image_processor SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor( UpperCAmelCase_ , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _A ( self : List[str] ): # Initialize image_processor SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 SCREAMING_SNAKE_CASE : Optional[Any] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches SCREAMING_SNAKE_CASE : Tuple = "Hello" SCREAMING_SNAKE_CASE : Tuple = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase_ , header_text=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor( UpperCAmelCase_ , return_tensors="pt" , max_patches=UpperCAmelCase_ , header_text=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _A ( self : Union[str, Any] ): # Initialize image_processor SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) SCREAMING_SNAKE_CASE : Optional[Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE : Tuple = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor( UpperCAmelCase_ , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _A ( self : int ): # Initialize image_processor SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE : List[Any] = image_processor( UpperCAmelCase_ , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None def _A ( self : Any ): SCREAMING_SNAKE_CASE : str = PixaStructImageProcessingTester(self , num_channels=4 ) SCREAMING_SNAKE_CASE : Union[str, Any] = 3 @property def _A ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def _A ( self : Any ): SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "do_convert_rgb" ) ) def _A ( self : Any ): # Initialize image_processor SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Tuple = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE : List[str] = image_processor( UpperCAmelCase_ , return_tensors="pt" , max_patches=UpperCAmelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
62
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): A : Any = tempfile.mkdtemp() A : List[str] = BlipImageProcessor() A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self, **lowerCamelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer def _lowerCAmelCase ( self, **lowerCamelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor def _lowerCAmelCase ( self ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )] A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" ) A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 ) A : Dict = BlipProcessor.from_pretrained( self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = self.get_image_processor() A : str = self.get_tokenizer() A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Any = self.prepare_image_inputs() A : int = image_processor(lowerCamelCase__, return_tensors="""np""" ) A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def _lowerCAmelCase ( self ): A : List[str] = self.get_image_processor() A : int = self.get_tokenizer() A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[Any] = """lower newer""" A : List[Any] = processor(text=lowerCamelCase__ ) A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def _lowerCAmelCase ( self ): A : List[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[int] = """lower newer""" A : Union[str, Any] = self.prepare_image_inputs() A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase__ ): processor() def _lowerCAmelCase ( self ): A : List[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A : Optional[int] = processor.batch_decode(lowerCamelCase__ ) A : Dict = tokenizer.batch_decode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = self.get_image_processor() A : int = self.get_tokenizer() A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[int] = """lower newer""" A : List[str] = self.prepare_image_inputs() A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
662
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=lowercase__ ) class a ( lowercase__ ): """simple docstring""" a : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} ) a : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} ) a : ClassVar[Features] = Features( { 'answers': Sequence( { 'text': Value('string' ), 'answer_start': Value('int32' ), } ) } ) a : str = "question" a : str = "context" a : str = "answers" @property def UpperCAmelCase ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
63
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ): return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy''' def _lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ): A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ ) return image def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ): A : str = jnp.bfloataa if fpaa else jnp.floataa A : Union[str, Any] = """bf16""" if fpaa else None A , A : str = FlaxUNetaDConditionModel.from_pretrained( lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ ) return model, params def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ): A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ ) A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ ) A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ ) A : Optional[Any] = model.apply( {"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample assert sample.shape == latents.shape A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ ) A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ ) A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ ) A : Dict = model.apply( {"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample assert sample.shape == latents.shape A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
662
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowercase_ : str = { 'vocab_file': { 'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json', 'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json', 'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json', 'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json', 'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json', }, 'merges_file': { 'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt', 'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt', 'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt', 'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt', 'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt', }, 'tokenizer_file': { 'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json', 'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json', 'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json', 'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json', 'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json', }, } lowercase_ : int = { 'gpt2': 1_0_2_4, 'gpt2-medium': 1_0_2_4, 'gpt2-large': 1_0_2_4, 'gpt2-xl': 1_0_2_4, 'distilgpt2': 1_0_2_4, } class _lowerCamelCase ( UpperCamelCase_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = ["input_ids", "attention_mask"] __a = GPTaTokenizer def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase=False , **lowerCAmelCase , ) -> Union[str, Any]: super().__init__( lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , ) SCREAMING_SNAKE_CASE__: Dict= kwargs.pop('''add_bos_token''' , lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE__: Optional[Any]= getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE__: Optional[Any]= add_prefix_space SCREAMING_SNAKE_CASE__: Any= pre_tok_class(**lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= add_prefix_space def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> BatchEncoding: SCREAMING_SNAKE_CASE__: int= kwargs.get('''is_split_into_words''' , lowerCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase ) def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> BatchEncoding: SCREAMING_SNAKE_CASE__: Optional[int]= kwargs.get('''is_split_into_words''' , lowerCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]: SCREAMING_SNAKE_CASE__: str= self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase ) return tuple(lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[int]: SCREAMING_SNAKE_CASE__: List[str]= [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] ) if len(lowerCAmelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE__: Dict= input_ids[-self.model_max_length :] return input_ids
64
from typing import Any import numpy as np def __UpperCamelCase ( _lowerCAmelCase ) -> bool: """simple docstring""" return np.array_equal(_lowerCAmelCase , matrix.conjugate().T ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : Any = v.conjugate().T A : List[Any] = v_star.dot(_lowerCAmelCase ) assert isinstance(_lowerCAmelCase , np.ndarray ) return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase )) def __UpperCamelCase ( ) -> None: """simple docstring""" A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) A : str = np.array([[1], [2], [3]] ) assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.''' print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) ) A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.''' assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
662
0
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
65
from __future__ import annotations import numpy as np def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]: """simple docstring""" A , A : int = np.shape(_lowerCAmelCase ) if rows != columns: A : Union[str, Any] = ( """'table' has to be of square shaped array but got a """ f'''{rows}x{columns} array:\n{table}''' ) raise ValueError(_lowerCAmelCase ) A : Union[str, Any] = np.zeros((rows, columns) ) A : Dict = np.zeros((rows, columns) ) for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) ) if upper[j][j] == 0: raise ArithmeticError("""No LU decomposition exists""" ) A : Any = (table[i][j] - total) / upper[j][j] A : Union[str, Any] = 1 for j in range(_lowerCAmelCase , _lowerCAmelCase ): A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) ) A : str = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
662
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ): A : Optional[int] = round(val / multiple ) * multiple if max_val is not None and x > max_val: A : Optional[Any] = math.floor(val / multiple ) * multiple if x < min_val: A : Any = math.ceil(val / multiple ) * multiple return x A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size A , A : List[Any] = get_image_size(_lowerCAmelCase ) A , A : List[Any] = output_size # determine new height and width A : Optional[int] = output_height / input_height A : Optional[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width A : Any = scale_width else: # fit height A : int = scale_height A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase ) A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[int] = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : int = size if size is not None else {"""height""": 384, """width""": 384} A : str = get_size_dict(lowerCamelCase__ ) A : Optional[Any] = do_resize A : Optional[int] = size A : Union[str, Any] = keep_aspect_ratio A : int = ensure_multiple_of A : Dict = resample A : Optional[Any] = do_rescale A : Any = rescale_factor A : str = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Dict = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) A : Optional[Any] = get_resize_output_image_size( lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, ) return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__ ) A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of A : Tuple = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : int = rescale_factor if rescale_factor is not None else self.rescale_factor A : int = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : Optional[int] = image_std if image_std is not None else self.image_std A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : str = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(lowerCamelCase__ ): A : int = target_sizes.numpy() A : Union[str, Any] = [] for idx in range(len(lowerCamelCase__ ) ): A : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ ) A : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase__ ) else: A : List[str] = logits.argmax(dim=1 ) A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
662
0
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 50 ) -> int: _lowercase = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"""{solution() = }""")
67
class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__ ): # we need a list not a string, so do something to change the type A : List[Any] = arr.split(""",""" ) def _lowerCAmelCase ( self ): A : int = [int(self.array[0] )] * len(self.array ) A : Optional[Any] = [int(self.array[0] )] * len(self.array ) for i in range(1, len(self.array ) ): A : Union[str, Any] = max( int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) ) A : Dict = max(sum_value[i], rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""") SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array) SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array() print(("""the results is:""", re))
662
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : str="<sep>" , __SCREAMING_SNAKE_CASE : Any="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<cls>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : Tuple=["<eop>", "<eod>"] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None: __UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token __UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =3 __UpperCAmelCase =do_lower_case __UpperCAmelCase =remove_space __UpperCAmelCase =keep_accents __UpperCAmelCase =vocab_file __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) __UpperCAmelCase =jieba __UpperCAmelCase =str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _a ( self : Optional[Any] ) -> List[Any]: return len(self.sp_model ) def _a ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> List[str]: __UpperCAmelCase =self.__dict__.copy() __UpperCAmelCase =None return state def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> str: __UpperCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase ={} __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple: if self.remove_space: __UpperCAmelCase =""" """.join(inputs.strip().split() ) else: __UpperCAmelCase =inputs __UpperCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: __UpperCAmelCase =unicodedata.normalize("""NFKD""" , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] ) if self.do_lower_case: __UpperCAmelCase =outputs.lower() return outputs def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> List[str]: __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =[] for piece in pieces: if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): __UpperCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __UpperCAmelCase =cur_pieces[1:] else: __UpperCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__SCREAMING_SNAKE_CASE ) else: new_pieces.append(__SCREAMING_SNAKE_CASE ) return new_pieces def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : str , __SCREAMING_SNAKE_CASE : int ) -> Any: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: __UpperCAmelCase ="""""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase =[self.sep_token_id] __UpperCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _a ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase =[self.sep_token_id] __UpperCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi: __UpperCAmelCase =self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Dict ) -> Dict: __UpperCAmelCase =super()._decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
68
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:List[Any] = { """google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[Any] = "bit" __lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"] __lowerCamelCase : Union[str, Any] = ["SAME", "VALID"] def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A : List[Any] = global_padding.upper() else: raise ValueError(f'''Padding strategy {global_padding} not supported''' ) A : Dict = num_channels A : List[Any] = embedding_size A : Optional[Any] = hidden_sizes A : str = depths A : str = layer_type A : Union[str, Any] = hidden_act A : Any = global_padding A : Optional[int] = num_groups A : Dict = drop_path_rate A : List[Any] = embedding_dynamic_padding A : List[Any] = output_stride A : Union[str, Any] = width_factor A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )] A , A : Any = get_aligned_output_features_output_indices( out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
662
0
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number_of_steps > 0 ), F'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 __snake_case , __snake_case = 1, 1 for _ in range(number_of_steps - 1 ): __snake_case , __snake_case = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
69
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ): A : List[str] = parent A : List[str] = batch_size A : Optional[int] = seq_length A : Optional[int] = is_training A : Tuple = use_input_mask A : Optional[Any] = vocab_size A : str = hidden_size A : Any = num_hidden_layers A : List[Any] = num_attention_heads A : Optional[int] = intermediate_size A : int = hidden_act A : Dict = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : List[Any] = max_position_embeddings A : int = initializer_range A : Tuple = use_labels A : List[str] = scope def _lowerCAmelCase ( self ): A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : int = None if self.use_input_mask: A : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def _lowerCAmelCase ( self ): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, ) def _lowerCAmelCase ( self ): ( ( A ) , ( A ) , ( A ) , ( A ) , ) : List[Any] = self.prepare_config_and_inputs() A : Any = True A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : str = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ ) A : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, ) A : Optional[Any] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : Union[str, Any] = True A : Optional[int] = True A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval() # first forward pass A : int = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, ) A : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size ) A : int = ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 ) A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 ) A : List[str] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] # select random slice A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item() A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() A : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ): A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self ): A , A , A , A : str = self.prepare_config_and_inputs() A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else () __lowerCamelCase : List[Any] = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def _lowerCAmelCase ( self ): A : Any = BertGenerationEncoderTester(self ) A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 ) def _lowerCAmelCase ( self ): self.config_tester.run_common_tests() def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs() A : Any = """bert""" self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): # This regression test was failing with PyTorch < 1.3 ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() A : int = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ) def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def _lowerCAmelCase ( self ): A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Union[str, Any] = model(lowerCamelCase__ )[0] A : List[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Tuple = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Dict = model(lowerCamelCase__ )[0] A : List[str] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Optional[Any] = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
662
0
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : int ): '''simple docstring''' if digit_amount > 0: return round(number - int(lowercase ) , lowercase ) return number - int(lowercase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
70
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : str = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384} A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Optional[Any] = do_resize A : Dict = size # Default value set here for backwards compatibility where the value in config is None A : Dict = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : List[str] = do_rescale A : Tuple = rescale_factor A : Optional[int] = do_normalize A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) A : List[str] = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : int = int(shortest_edge / crop_pct ) A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Dict = do_resize if do_resize is not None else self.do_resize A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct A : str = resample if resample is not None else self.resample A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor A : Dict = do_normalize if do_normalize is not None else self.do_normalize A : List[str] = image_mean if image_mean is not None else self.image_mean A : Optional[Any] = image_std if image_std is not None else self.image_std A : Optional[Any] = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Dict = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
662
0
'''simple docstring''' import json import sys def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[Any] = results[benchmark_name] UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) UpperCAmelCase_ : Any = "| metric |" UpperCAmelCase_ : Any = "|--------|" UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |" for metric_name in sorted(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = benchmark_res[metric_name] UpperCAmelCase_ : Union[str, Any] = metric_vals["new"] UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": _lowerCamelCase = sys.argv[1] _lowerCamelCase = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
71
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" A : Dict = """backbone.""" if is_semantic else """""" A : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', """beit.embeddings.cls_token"""), (f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""), (f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""), (f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("""mask_token""", """beit.embeddings.mask_token"""), ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) else: # layernorm + classification head rename_keys.extend( [ ("""fc_norm.weight""", """beit.pooler.layernorm.weight"""), ("""fc_norm.bias""", """beit.pooler.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" for i in range(config.num_hidden_layers ): A : Dict = """backbone.""" if is_semantic else """""" # queries, keys and values A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) A : int = in_proj_weight[ : config.hidden_size, : ] A : Any = q_bias A : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A : Tuple = in_proj_weight[ -config.hidden_size :, : ] A : Union[str, Any] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) A : Dict = gamma_a A : Dict = gamma_a def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: """simple docstring""" A : List[str] = dct.pop(_lowerCAmelCase ) A : Optional[Any] = val def __UpperCamelCase ( ) -> List[str]: """simple docstring""" A : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str: """simple docstring""" A : Dict = False if """rvlcdip""" in checkpoint_url else True A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: A : Dict = 1024 A : List[Any] = 4096 A : int = 24 A : int = 16 # labels if "rvlcdip" in checkpoint_url: A : List[Any] = 16 A : List[Any] = """huggingface/label-files""" A : int = """rvlcdip-id2label.json""" A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} A : int = idalabel A : Union[str, Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""] A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase ) # load HuggingFace model A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase ) model.eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A : Any = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase ) A : int = prepare_img() A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) A : str = encoding["""pixel_values"""] A : Tuple = model(_lowerCAmelCase ) A : Optional[int] = outputs.logits # verify logits A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: if has_lm_head: A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large""" else: A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip""" image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
662
0
'''simple docstring''' _UpperCAmelCase : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def UpperCamelCase ( lowercase_ : bytes ) -> bytes: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): lowercase =f'a bytes-like object is required, not \'{data.__class__.__name__}\'' raise TypeError(lowercase_ ) lowercase =''''''.join(bin(lowercase_ )[2:].zfill(8 ) for byte in data ) lowercase =len(lowercase_ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase =b'''=''' * ((6 - len(lowercase_ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(lowercase_ ) % 6) else: lowercase =b'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(lowercase_ ) , 6 ) ).encode() + padding ) def UpperCamelCase ( lowercase_ : str ) -> bytes: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ): lowercase =( '''argument should be a bytes-like object or ASCII string, ''' f'not \'{encoded_data.__class__.__name__}\'' ) raise TypeError(lowercase_ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(lowercase_ , lowercase_ ): try: lowercase =encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) lowercase =encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(lowercase_ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase =encoded_data[:-padding] lowercase =''''''.join( bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase =''''''.join( bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase =[ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(lowercase_ ) , 8 ) ] return bytes(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
72
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""", lowerCamelCase__, ) super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
662
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : int = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, ) A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths} A : str = Text( cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, ) def _lowerCAmelCase ( self ): # Build iterable dataset if self.streaming: A : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A : List[str] = None A : Dict = None A : Tuple = None A : Tuple = None self.builder.download_and_prepare( download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, ) A : List[str] = self.builder.as_dataset( split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory ) return dataset
662
0
import argparse import os import re import packaging.version lowercase_ = """examples/""" lowercase_ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } lowercase_ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } lowercase_ = """README.md""" def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[Any] = f.read() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE : List[Any] = replace.replace('''VERSION''' , snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = re_pattern.sub(snake_case , snake_case ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(snake_case ) def a__ ( snake_case ): """simple docstring""" for folder, directories, fnames in os.walk(snake_case ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern='''examples''' ) def a__ ( snake_case , snake_case=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(snake_case , snake_case , snake_case ) if not patch: update_version_in_examples(snake_case ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' __SCREAMING_SNAKE_CASE : Union[str, Any] = '''1. Want to contribute a new model?''' with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE : Any = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE : Tuple = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __SCREAMING_SNAKE_CASE : int = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) def a__ ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __SCREAMING_SNAKE_CASE : Union[str, Any] = f.read() __SCREAMING_SNAKE_CASE : Union[str, Any] = REPLACE_PATTERNS['''init'''][0].search(snake_case ).groups()[0] return packaging.version.parse(snake_case ) def a__ ( snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE : List[Any] = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: __SCREAMING_SNAKE_CASE : List[Any] = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE : Any = input(F'''Which version are you releasing? [{default_version}]''' ) if len(snake_case ) == 0: __SCREAMING_SNAKE_CASE : Optional[Any] = default_version print(F'''Updating version to {version}.''' ) global_version_update(snake_case , patch=snake_case ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = get_version() __SCREAMING_SNAKE_CASE : Optional[Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' __SCREAMING_SNAKE_CASE : Optional[Any] = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE : Dict = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(snake_case ) == 0: __SCREAMING_SNAKE_CASE : Optional[Any] = dev_version print(F'''Updating version to {version}.''' ) global_version_update(snake_case ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") lowercase_ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
74
from typing import TYPE_CHECKING from ....utils import _LazyModule SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
662
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
75
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int: """simple docstring""" A , A : str = 1, 1 A : List[Any] = [] for i in range(1 , n + 1 ): A : Optional[int] = prev_numerator + 2 * prev_denominator A : Any = prev_numerator + prev_denominator if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ): result.append(_lowerCAmelCase ) A : int = numerator A : int = denominator return len(_lowerCAmelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
662
0
"""simple docstring""" from __future__ import annotations a_ = [True] * 1_0_0_0_0_0_1 a_ = 2 while i * i <= 1_0_0_0_0_0_0: if seive[i]: for j in range(i * i, 1_0_0_0_0_0_1, i): a_ = False i += 1 def __UpperCAmelCase ( __UpperCamelCase ): return seive[n] def __UpperCAmelCase ( __UpperCamelCase ): return any(digit in '''02468''' for digit in str(__UpperCamelCase ) ) def __UpperCAmelCase ( __UpperCamelCase = 1_00_00_00 ): __lowercase : int = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(__UpperCamelCase ) and not contains_an_even_digit(__UpperCamelCase ): __lowercase : Dict = str(__UpperCamelCase ) __lowercase : Dict = [int(str_num[j:] + str_num[:j] ) for j in range(len(__UpperCamelCase ) )] if all(is_prime(__UpperCamelCase ) for i in list_nums ): result.append(__UpperCamelCase ) return result def __UpperCAmelCase ( ): return len(find_circular_primes() ) if __name__ == "__main__": print(F"{len(find_circular_primes()) = }")
76
import re def __UpperCamelCase ( _lowerCAmelCase ) -> str: """simple docstring""" if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
662
0
"""simple docstring""" from __future__ import annotations def _UpperCamelCase ( UpperCamelCase ) -> list[int]: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = min(UpperCamelCase ), max(UpperCamelCase ) # Compute the variables __UpperCAmelCase : List[Any] = _max - _min + 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: __UpperCAmelCase : List[str] = i - _min __UpperCAmelCase : Optional[Any] = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. __UpperCAmelCase : str = 0 for i in range(UpperCamelCase ): while holes_repeat[i] > 0: __UpperCAmelCase : str = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by comma:\n""") A = [int(x) for x in user_input.split(""",""")] print(pigeon_sort(unsorted))
77
from __future__ import annotations SCREAMING_SNAKE_CASE_:Tuple = """#""" class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self ): A : dict = {} def _lowerCAmelCase ( self, lowerCamelCase__ ): A : List[Any] = self._trie for char in text: if char not in trie: A : str = {} A : str = trie[char] A : Optional[int] = True def _lowerCAmelCase ( self, lowerCamelCase__ ): A : Dict = self._trie for char in prefix: if char in trie: A : Optional[Any] = trie[char] else: return [] return self._elements(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__ ): A : int = [] for c, v in d.items(): A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )] result.extend(lowerCamelCase__ ) return tuple(lowerCamelCase__ ) SCREAMING_SNAKE_CASE_:Any = Trie() SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def __UpperCamelCase ( _lowerCAmelCase ) -> tuple: """simple docstring""" A : List[str] = trie.find_word(_lowerCAmelCase ) return tuple(string + word for word in suffixes ) def __UpperCamelCase ( ) -> None: """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
662
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "imagenet-1k-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCAmelCase_ = BitConfig( conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if "stem.conv" in name: UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "head.fc" in name: UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): UpperCAmelCase_ = "bit." + name if "bit" not in name and "classifier" not in name: UpperCAmelCase_ = "bit.encoder." + name return name def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_config(snake_case_ ) # load original model from timm UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model UpperCAmelCase_ = timm_model.state_dict() for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val.squeeze() if "head" in key else val # load HuggingFace model UpperCAmelCase_ = BitForImageClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # create image processor UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) ) UpperCAmelCase_ = transform.transforms UpperCAmelCase_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCAmelCase_ = BitImageProcessor( do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ) UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(snake_case_ , snake_case_ ) # verify logits with torch.no_grad(): UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCAmelCase_ = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
78
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = bnb_quantization_config.load_in_abit A : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) A : Any = [] # custom device map if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1: A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A : int = get_keys_to_not_convert(_lowerCAmelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_lowerCAmelCase ) A : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A : Dict = [] A : Tuple = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_lowerCAmelCase ) # compatibility with peft A : Union[str, Any] = load_in_abit A : Tuple = load_in_abit A : List[str] = get_parameter_device(_lowerCAmelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase ) # convert param to the right dtype A : Tuple = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_lowerCAmelCase ): param.to(_lowerCAmelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): A : str = replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase ) A : Optional[Any] = get_quantized_model_device_map( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A : Tuple = True A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]: """simple docstring""" if device_map is None: if torch.cuda.is_available(): A : Optional[int] = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) A : Tuple = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A : Any = {} A : List[str] = special_dtypes A : Any = no_split_module_classes A : Union[str, Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A : Tuple = get_balanced_memory( _lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , ) A : int = max_memory A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # check if don't have any quantized module on the cpu A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A : Optional[int] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: """simple docstring""" if modules_to_not_convert is None: A : Optional[Any] = [] A , A : Dict = _replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int: """simple docstring""" A : Optional[int] = False for name, module in model.named_children(): if current_key_name is None: A : int = [] current_key_name.append(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A : Dict = """.""".join(_lowerCAmelCase ) A : Optional[Any] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A : Dict = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A : Optional[Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: A : Dict = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) A : Any = module.weight.data if module.bias is not None: A : Any = module.bias.data bnb_module.requires_grad_(_lowerCAmelCase ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Dict = True if len(list(module.children() ) ) > 0: A , A : Dict = _replace_with_bnb_layers( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" with init_empty_weights(): A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A : Optional[int] = find_tied_parameters(_lowerCAmelCase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowerCAmelCase , _lowerCAmelCase ): A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A : Optional[int] = sum(_lowerCAmelCase , [] ) A : Tuple = len(_lowerCAmelCase ) > 0 # Check if it is a base model A : List[str] = False if hasattr(_lowerCAmelCase , """base_model_prefix""" ): A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A : str = list(model.named_children() ) A : Tuple = [list_modules[-1][0]] # add last module together with tied weights A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase ) # remove ".weight" from the keys A : Union[str, Any] = [""".weight""", """.bias"""] A : Optional[int] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A : List[str] = name.replace(_lowerCAmelCase , """""" ) filtered_module_names.append(_lowerCAmelCase ) return filtered_module_names def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]: """simple docstring""" for m in model.modules(): if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ): return True return False def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]: """simple docstring""" return next(parameter.parameters() ).device def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase ) A : Tuple = param_name A : Union[str, Any] = model if "." in tensor_name: A : int = tensor_name.split(""".""" ) for split in splits[:-1]: A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) A : Optional[Any] = new_module A : List[str] = splits[-1] # offload weights A : Optional[int] = False offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , ) else: offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase ) offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase ) set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
662
0
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False) parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""") parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""") SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Optional[int] = """cpu""" SCREAMING_SNAKE_CASE__ : List[str] = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings""" SCREAMING_SNAKE_CASE__ : List[str] = """path-to-your-trained-model""" SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: SCREAMING_SNAKE_CASE__ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE__ : List[str] = pipe.to(device) # to channels last SCREAMING_SNAKE_CASE__ : Optional[int] = pipe.unet.to(memory_format=torch.channels_last) SCREAMING_SNAKE_CASE__ : Dict = pipe.vae.to(memory_format=torch.channels_last) SCREAMING_SNAKE_CASE__ : str = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: SCREAMING_SNAKE_CASE__ : str = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.randn(2, 4, 64, 64) SCREAMING_SNAKE_CASE__ : List[Any] = torch.rand(1) * 9_99 SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(2, 77, 7_68) SCREAMING_SNAKE_CASE__ : List[Any] = (sample, timestep, encoder_hidden_status) try: SCREAMING_SNAKE_CASE__ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: SCREAMING_SNAKE_CASE__ : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) SCREAMING_SNAKE_CASE__ : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) SCREAMING_SNAKE_CASE__ : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: SCREAMING_SNAKE_CASE__ : Any = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute SCREAMING_SNAKE_CASE__ : int = 6_66 SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device).manual_seed(seed) SCREAMING_SNAKE_CASE__ : Any = {"""generator""": generator} if args.steps is not None: SCREAMING_SNAKE_CASE__ : List[Any] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): SCREAMING_SNAKE_CASE__ : str = pipe(prompt, **generate_kwargs).images[0] # save image image.save("""generated.png""")
79
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def __UpperCamelCase ( ) -> Dict: """simple docstring""" A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(_lowerCAmelCase ) DownloadCommand.register_subcommand(_lowerCAmelCase ) EnvironmentCommand.register_subcommand(_lowerCAmelCase ) RunCommand.register_subcommand(_lowerCAmelCase ) ServeCommand.register_subcommand(_lowerCAmelCase ) UserCommands.register_subcommand(_lowerCAmelCase ) AddNewModelCommand.register_subcommand(_lowerCAmelCase ) AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase ) LfsCommands.register_subcommand(_lowerCAmelCase ) PTtoTFCommand.register_subcommand(_lowerCAmelCase ) # Let's go A : Tuple = parser.parse_args() if not hasattr(_lowerCAmelCase , """func""" ): parser.print_help() exit(1 ) # Run A : Any = args.func(_lowerCAmelCase ) service.run() if __name__ == "__main__": main()
662
0
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = FlaxAutoencoderKL @property def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = 4 __lowercase = 3 __lowercase = (32, 32) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.uniform(_lowerCAmelCase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def _a ( self : str ) -> Optional[int]: """simple docstring""" __lowercase = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } __lowercase = self.dummy_input return init_dict, inputs_dict
80
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_:int = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Optional[int] = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Any = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
662
0
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor _snake_case : int = logging.get_logger(__name__) class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Optional[int] , *lowerCamelCase : List[Any] , **lowerCamelCase : int ) -> None: warnings.warn( "The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ImageGPTImageProcessor instead." , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
81
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]: """simple docstring""" A : Optional[int] = int(_lowerCAmelCase ) # Initialize Result A : int = [] # Traverse through all denomination for denomination in reversed(_lowerCAmelCase ): # Find denominations while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ): total_value -= int(_lowerCAmelCase ) answer.append(_lowerCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": SCREAMING_SNAKE_CASE_:List[Any] = [] SCREAMING_SNAKE_CASE_:Dict = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F"""Following is minimal change for {value}: """) SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
662
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available lowerCamelCase = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
82
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab)))) SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname) SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""") SCREAMING_SNAKE_CASE_:str = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
662
0
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger() def snake_case_ ( A_ : int, A_ : str, A_ : LevitConfig, A_ : Path, A_ : bool = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": _lowerCamelCase : int = timm.create_model('''levit_128s''', pretrained=A_ ) else: _lowerCamelCase : Tuple = timm.create_model('''levit_128''', pretrained=A_ ) if hidden_sizes == 1_92: _lowerCamelCase : List[str] = timm.create_model('''levit_192''', pretrained=A_ ) if hidden_sizes == 2_56: _lowerCamelCase : Union[str, Any] = timm.create_model('''levit_256''', pretrained=A_ ) if hidden_sizes == 3_84: _lowerCamelCase : Union[str, Any] = timm.create_model('''levit_384''', pretrained=A_ ) from_model.eval() _lowerCamelCase : Any = LevitForImageClassificationWithTeacher(A_ ).eval() _lowerCamelCase : int = OrderedDict() _lowerCamelCase : Any = from_model.state_dict() _lowerCamelCase : List[str] = list(from_model.state_dict().keys() ) _lowerCamelCase : List[str] = list(our_model.state_dict().keys() ) print(len(A_ ), len(A_ ) ) for i in range(len(A_ ) ): _lowerCamelCase : Union[str, Any] = weights[og_keys[i]] our_model.load_state_dict(A_ ) _lowerCamelCase : Optional[int] = torch.randn((2, 3, 2_24, 2_24) ) _lowerCamelCase : Union[str, Any] = from_model(A_ ) _lowerCamelCase : Optional[Any] = our_model(A_ ).logits assert torch.allclose(A_, A_ ), "The model logits don't match the original one." _lowerCamelCase : int = name print(A_ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) _lowerCamelCase : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def snake_case_ ( A_ : Path, A_ : str = None, A_ : bool = True ): '''simple docstring''' _lowerCamelCase : Dict = '''imagenet-1k-id2label.json''' _lowerCamelCase : Dict = 10_00 _lowerCamelCase : Union[str, Any] = (1, num_labels) _lowerCamelCase : Tuple = '''huggingface/label-files''' _lowerCamelCase : Any = num_labels _lowerCamelCase : List[Any] = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) ) _lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()} _lowerCamelCase : int = partial(A_, num_labels=A_, idalabel=A_, labelaid=A_ ) _lowerCamelCase : Optional[int] = { '''levit-128S''': 1_28, '''levit-128''': 1_28, '''levit-192''': 1_92, '''levit-256''': 2_56, '''levit-384''': 3_84, } _lowerCamelCase : Any = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name], A_, names_to_config[model_name], A_, A_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name], A_, A_, A_, A_ ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
83
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:int = """Hello, World!""" SCREAMING_SNAKE_CASE_:List[Any] = """en_XX""" def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : Optional[int] = Path("""data_bin""" ) A : Optional[Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , ) xmod.eval() # disable dropout print(_lowerCAmelCase ) A : Any = xmod.model.encoder.sentence_encoder A : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our X-MOD config:""" , _lowerCAmelCase ) A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase ) model.eval() # Now let's copy all the weights. # Embeddings A : Any = xmod_sent_encoder.embed_tokens.weight A : int = xmod_sent_encoder.embed_positions.weight A : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. A : Dict = xmod_sent_encoder.layernorm_embedding.weight A : int = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer A : str = model.roberta.encoder.layer[i] A : Tuple = xmod_sent_encoder.layers[i] # self attention A : Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("""Dimensions of self-attention weights do not match.""" ) A : List[str] = xmod_layer.self_attn.q_proj.weight A : Optional[int] = xmod_layer.self_attn.q_proj.bias A : List[Any] = xmod_layer.self_attn.k_proj.weight A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias A : Optional[int] = xmod_layer.self_attn.v_proj.weight A : Dict = xmod_layer.self_attn.v_proj.bias # self-attention output A : Optional[Any] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("""Dimensions of self-attention output weights do not match.""" ) A : Optional[Any] = xmod_layer.self_attn.out_proj.weight A : Dict = xmod_layer.self_attn.out_proj.bias A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight A : str = xmod_layer.self_attn_layer_norm.bias # intermediate A : str = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of intermediate weights do not match.""" ) A : Optional[int] = xmod_layer.fca.weight A : Optional[int] = xmod_layer.fca.bias # output A : Dict = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of feed-forward weights do not match.""" ) A : Union[str, Any] = xmod_layer.fca.weight A : int = xmod_layer.fca.bias A : List[str] = xmod_layer.final_layer_norm.weight A : Optional[Any] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: A : str = xmod_layer.adapter_layer_norm.weight A : str = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("""Lists of language adapters do not match.""" ) for lang_code, adapter in xmod_layer.adapter_modules.items(): A : Optional[int] = bert_output.adapter_modules[lang_code] A : int = xmod_layer.adapter_modules[lang_code] A : Optional[Any] = from_adapter.fca.weight A : Optional[Any] = from_adapter.fca.bias A : List[str] = from_adapter.fca.weight A : Any = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: A : Dict = xmod_sent_encoder.layer_norm.weight A : int = xmod_sent_encoder.layer_norm.bias if classification_head: A : int = xmod.model.classification_heads["""mnli"""].dense.weight A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head A : Any = xmod.model.encoder.lm_head.dense.weight A : Tuple = xmod.model.encoder.lm_head.dense.bias A : Any = xmod.model.encoder.lm_head.layer_norm.weight A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias A : Union[str, Any] = xmod.model.encoder.lm_head.weight A : Tuple = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_lowerCAmelCase ) A : List[str] = model(_lowerCAmelCase )[0] if classification_head: A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) ) else: A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) A : str = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
662
0
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = [int(__SCREAMING_SNAKE_CASE ) for i in ip_va_address.split('.' ) if i.isdigit()] return len(__SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(__SCREAMING_SNAKE_CASE ) <= 254 for octet in octets ) if __name__ == "__main__": UpperCAmelCase = input().strip() UpperCAmelCase = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
84
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): A : Any = tempfile.mkdtemp() A : List[str] = BlipImageProcessor() A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self, **lowerCamelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer def _lowerCAmelCase ( self, **lowerCamelCase__ ): return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor def _lowerCAmelCase ( self ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )] A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" ) A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 ) A : Dict = BlipProcessor.from_pretrained( self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = self.get_image_processor() A : str = self.get_tokenizer() A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Any = self.prepare_image_inputs() A : int = image_processor(lowerCamelCase__, return_tensors="""np""" ) A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def _lowerCAmelCase ( self ): A : List[str] = self.get_image_processor() A : int = self.get_tokenizer() A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[Any] = """lower newer""" A : List[Any] = processor(text=lowerCamelCase__ ) A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def _lowerCAmelCase ( self ): A : List[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[int] = """lower newer""" A : Union[str, Any] = self.prepare_image_inputs() A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase__ ): processor() def _lowerCAmelCase ( self ): A : List[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A : Optional[int] = processor.batch_decode(lowerCamelCase__ ) A : Dict = tokenizer.batch_decode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = self.get_image_processor() A : int = self.get_tokenizer() A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Optional[int] = """lower newer""" A : List[str] = self.prepare_image_inputs() A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
662
0
def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = 0, 0, 0 SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 2 SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 3 SCREAMING_SNAKE_CASE__ : Optional[Any] = ugly_nums[ia] * 5 for _ in range(1 , lowercase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = min(lowercase__ , lowercase__ , lowercase__ ) ugly_nums.append(lowercase__ ) if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Dict = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : str = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F"""{ugly_numbers(200) = }""")
85
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ): return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy''' def _lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ): A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ ) return image def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ): A : str = jnp.bfloataa if fpaa else jnp.floataa A : Union[str, Any] = """bf16""" if fpaa else None A , A : str = FlaxUNetaDConditionModel.from_pretrained( lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ ) return model, params def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ): A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ ) A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ ) A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ ) A : Optional[Any] = model.apply( {"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample assert sample.shape == latents.shape A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ ) A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ ) A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ ) A : Dict = model.apply( {"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample assert sample.shape == latents.shape A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
662
0
from collections.abc import Callable import numpy as np def __snake_case ( __UpperCamelCase : Callable ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" A_ = int(np.ceil((x_end - xa) / step_size ) ) A_ = np.zeros((n + 1,) ) A_ = ya A_ = xa for k in range(__UpperCamelCase ): A_ = y[k] + step_size * ode_func(__UpperCamelCase ,y[k] ) A_ = y[k] + ( (step_size / 2) * (ode_func(__UpperCamelCase ,y[k] ) + ode_func(x + step_size ,__UpperCamelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
86
from typing import Any import numpy as np def __UpperCamelCase ( _lowerCAmelCase ) -> bool: """simple docstring""" return np.array_equal(_lowerCAmelCase , matrix.conjugate().T ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : Any = v.conjugate().T A : List[Any] = v_star.dot(_lowerCAmelCase ) assert isinstance(_lowerCAmelCase , np.ndarray ) return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase )) def __UpperCamelCase ( ) -> None: """simple docstring""" A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) A : str = np.array([[1], [2], [3]] ) assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.''' print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) ) A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.''' assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
662
0
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _lowerCamelCase : Union[str, Any] = """scheduler_config.json""" class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = 1 UpperCAmelCase__ = 2 UpperCAmelCase__ = 3 UpperCAmelCase__ = 4 UpperCAmelCase__ = 5 @dataclass class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = 42 class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = SCHEDULER_CONFIG_NAME UpperCAmelCase__ = ['''dtype'''] UpperCAmelCase__ = [] UpperCAmelCase__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : Union[str, Any] , ) ->Union[str, Any]: '''simple docstring''' A__ , A__ = cls.load_config( pretrained_model_name_or_path=UpperCAmelCase__ , subfolder=UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ , A__ = cls.from_config(UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__) if hasattr(UpperCAmelCase__ , '''create_state''') and getattr(UpperCAmelCase__ , '''has_state''' , UpperCAmelCase__): A__ = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Optional[Any]) ->List[Any]: '''simple docstring''' self.save_config(save_directory=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ , **UpperCAmelCase__) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE ( cls : int) ->Dict: '''simple docstring''' A__ = list(set([cls.__name__] + cls._compatibles)) A__ = importlib.import_module(__name__.split('''.''')[0]) A__ = [ getattr(UpperCAmelCase__ , UpperCAmelCase__) for c in compatible_classes_str if hasattr(UpperCAmelCase__ , UpperCAmelCase__) ] return compatible_classes def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> jnp.ndarray: """simple docstring""" assert len(lowercase_ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase_ ) - x.ndim) ) , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=0.9_99 , lowercase_=jnp.floataa ) -> jnp.ndarray: """simple docstring""" def alpha_bar(lowercase_ ): return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 A__ = [] for i in range(lowercase_ ): A__ = i / num_diffusion_timesteps A__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowercase_ ) / alpha_bar(lowercase_ ) , lowercase_ ) ) return jnp.array(lowercase_ , dtype=lowercase_ ) @flax.struct.dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : List[str]) ->Any: '''simple docstring''' A__ = scheduler.config if config.trained_betas is not None: A__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype) elif config.beta_schedule == "linear": A__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. A__ = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule A__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype) else: raise NotImplementedError( f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""") A__ = 1.0 - betas A__ = jnp.cumprod(UpperCAmelCase__ , axis=0) return cls( alphas=UpperCAmelCase__ , betas=UpperCAmelCase__ , alphas_cumprod=UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" A__ = state.alphas_cumprod A__ = alphas_cumprod[timesteps] ** 0.5 A__ = sqrt_alpha_prod.flatten() A__ = broadcast_to_shape_from_left(lowercase_ , original_samples.shape ) A__ = (1 - alphas_cumprod[timesteps]) ** 0.5 A__ = sqrt_one_minus_alpha_prod.flatten() A__ = broadcast_to_shape_from_left(lowercase_ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" A__ , A__ = get_sqrt_alpha_prod(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) A__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" A__ , A__ = get_sqrt_alpha_prod(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) A__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
87
from __future__ import annotations import numpy as np def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]: """simple docstring""" A , A : int = np.shape(_lowerCAmelCase ) if rows != columns: A : Union[str, Any] = ( """'table' has to be of square shaped array but got a """ f'''{rows}x{columns} array:\n{table}''' ) raise ValueError(_lowerCAmelCase ) A : Union[str, Any] = np.zeros((rows, columns) ) A : Dict = np.zeros((rows, columns) ) for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) ) if upper[j][j] == 0: raise ArithmeticError("""No LU decomposition exists""" ) A : Any = (table[i][j] - total) / upper[j][j] A : Union[str, Any] = 1 for j in range(_lowerCAmelCase , _lowerCAmelCase ): A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) ) A : str = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
662
0
"""simple docstring""" import torch from transformers import AutoModel class lowercase__ ( torch.nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased") -> str: super(SCREAMING_SNAKE_CASE , self).__init__() _lowerCamelCase : Union[str, Any] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = torch.nn.CosineSimilarity(3 , 1e-0_8) _lowerCamelCase : Optional[int] = torch.nn.Softmax(dim=1) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> str: return self.bert(**SCREAMING_SNAKE_CASE).last_hidden_state def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]: return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1) -> Union[str, Any]: return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : str = W_supports["""sizes"""].tolist() _lowerCamelCase : int = W_supports["""start_token_id"""].item() _lowerCamelCase : str = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _lowerCamelCase : List[str] = self.BERT(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = None _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = W_supports["""input_ids"""] == start_token_id _lowerCamelCase : Any = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(SCREAMING_SNAKE_CASE): if i == 0: _lowerCamelCase : List[str] = 0 else: _lowerCamelCase : Dict = support_sizes[i - 1] _lowerCamelCase : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]] _lowerCamelCase : Any = S[s : s + size][end_token_masks[s : s + size]] _lowerCamelCase : Any = torch.matmul(q[i] , s_start.T).sum(1).softmax(0) _lowerCamelCase : List[str] = torch.matmul(q[i] , s_end.T).sum(1).softmax(0) if p_starts is not None: _lowerCamelCase : str = torch.vstack((p_starts, p_start)) _lowerCamelCase : Optional[Any] = torch.vstack((p_ends, p_end)) else: _lowerCamelCase : Optional[Any] = p_start _lowerCamelCase : int = p_end return p_starts, p_ends
88
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ): A : Optional[int] = round(val / multiple ) * multiple if max_val is not None and x > max_val: A : Optional[Any] = math.floor(val / multiple ) * multiple if x < min_val: A : Any = math.ceil(val / multiple ) * multiple return x A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size A , A : List[Any] = get_image_size(_lowerCAmelCase ) A , A : List[Any] = output_size # determine new height and width A : Optional[int] = output_height / input_height A : Optional[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width A : Any = scale_width else: # fit height A : int = scale_height A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase ) A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[int] = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : int = size if size is not None else {"""height""": 384, """width""": 384} A : str = get_size_dict(lowerCamelCase__ ) A : Optional[Any] = do_resize A : Optional[int] = size A : Union[str, Any] = keep_aspect_ratio A : int = ensure_multiple_of A : Dict = resample A : Optional[Any] = do_rescale A : Any = rescale_factor A : str = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Dict = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) A : Optional[Any] = get_resize_output_image_size( lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, ) return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__ ) A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of A : Tuple = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : int = rescale_factor if rescale_factor is not None else self.rescale_factor A : int = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : Optional[int] = image_std if image_std is not None else self.image_std A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : str = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(lowerCamelCase__ ): A : int = target_sizes.numpy() A : Union[str, Any] = [] for idx in range(len(lowerCamelCase__ ) ): A : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ ) A : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase__ ) else: A : List[str] = logits.argmax(dim=1 ) A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
662
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = {"tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE : Tuple = { "tokenizer_file": { "bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json", "bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json", }, } class _lowerCamelCase( _a ): lowercase_ : Optional[Any] = VOCAB_FILES_NAMES lowercase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP lowercase_ : str = ["""input_ids""", """attention_mask"""] lowercase_ : Tuple = None def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="<unk>", lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="<pad>", lowerCamelCase=False, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[Any]: """simple docstring""" super().__init__( lowerCamelCase, lowerCamelCase, tokenizer_file=lowerCamelCase, unk_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, pad_token=lowerCamelCase, add_prefix_space=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, **lowerCamelCase, ) _lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', lowerCamelCase) != add_prefix_space: _lowercase : Dict = getattr(lowerCamelCase, pre_tok_state.pop('type')) _lowercase : Optional[int] = add_prefix_space _lowercase : List[Any] = pre_tok_class(**lowerCamelCase) _lowercase : Tuple = add_prefix_space def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> BatchEncoding: """simple docstring""" _lowercase : Dict = kwargs.get('is_split_into_words', lowerCamelCase) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.') return super()._batch_encode_plus(*lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> BatchEncoding: """simple docstring""" _lowercase : Dict = kwargs.get('is_split_into_words', lowerCamelCase) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.') return super()._encode_plus(*lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]: """simple docstring""" _lowercase : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase) return tuple(lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase) -> List[int]: """simple docstring""" _lowercase : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCamelCase, add_special_tokens=lowerCamelCase) + [self.eos_token_id]) if len(lowerCamelCase) > self.model_max_length: _lowercase : Optional[Any] = input_ids[-self.model_max_length :] return input_ids
89
class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__ ): # we need a list not a string, so do something to change the type A : List[Any] = arr.split(""",""" ) def _lowerCAmelCase ( self ): A : int = [int(self.array[0] )] * len(self.array ) A : Optional[Any] = [int(self.array[0] )] * len(self.array ) for i in range(1, len(self.array ) ): A : Union[str, Any] = max( int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) ) A : Dict = max(sum_value[i], rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""") SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array) SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array() print(("""the results is:""", re))
662
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) class a__ ( a__ ): '''simple docstring''' lowercase__ : Any = "upernet" def __init__( self , lowerCamelCase_=None , lowerCamelCase_=5_12 , lowerCamelCase_=0.02 , lowerCamelCase_=[1, 2, 3, 6] , lowerCamelCase_=True , lowerCamelCase_=0.4 , lowerCamelCase_=3_84 , lowerCamelCase_=2_56 , lowerCamelCase_=1 , lowerCamelCase_=False , lowerCamelCase_=2_55 , **lowerCamelCase_ , ) -> Union[str, Any]: super().__init__(**lowerCamelCase_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = backbone_config.get('''model_type''' ) lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase__ = config_class.from_dict(lowerCamelCase_ ) lowerCAmelCase__ = backbone_config lowerCAmelCase__ = hidden_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = pool_scales lowerCAmelCase__ = use_auxiliary_head lowerCAmelCase__ = auxiliary_loss_weight lowerCAmelCase__ = auxiliary_in_channels lowerCAmelCase__ = auxiliary_channels lowerCAmelCase__ = auxiliary_num_convs lowerCAmelCase__ = auxiliary_concat_input lowerCAmelCase__ = loss_ignore_index def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ = self.backbone_config.to_dict() lowerCAmelCase__ = self.__class__.model_type return output
90
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:List[Any] = { """google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[Any] = "bit" __lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"] __lowerCamelCase : Union[str, Any] = ["SAME", "VALID"] def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A : List[Any] = global_padding.upper() else: raise ValueError(f'''Padding strategy {global_padding} not supported''' ) A : Dict = num_channels A : List[Any] = embedding_size A : Optional[Any] = hidden_sizes A : str = depths A : str = layer_type A : Union[str, Any] = hidden_act A : Any = global_padding A : Optional[int] = num_groups A : Dict = drop_path_rate A : List[Any] = embedding_dynamic_padding A : List[Any] = output_stride A : Union[str, Any] = width_factor A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )] A , A : Any = get_aligned_output_features_output_indices( out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
662
0
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ): A : List[str] = parent A : List[str] = batch_size A : Optional[int] = seq_length A : Optional[int] = is_training A : Tuple = use_input_mask A : Optional[Any] = vocab_size A : str = hidden_size A : Any = num_hidden_layers A : List[Any] = num_attention_heads A : Optional[int] = intermediate_size A : int = hidden_act A : Dict = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : List[Any] = max_position_embeddings A : int = initializer_range A : Tuple = use_labels A : List[str] = scope def _lowerCAmelCase ( self ): A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : int = None if self.use_input_mask: A : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) A : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def _lowerCAmelCase ( self ): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, ) def _lowerCAmelCase ( self ): ( ( A ) , ( A ) , ( A ) , ( A ) , ) : List[Any] = self.prepare_config_and_inputs() A : Any = True A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : str = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ ) A : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, ) A : Optional[Any] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ): A : Union[str, Any] = True A : Optional[int] = True A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval() # first forward pass A : int = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, ) A : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size ) A : int = ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 ) A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 ) A : List[str] = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] A : Any = model( lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0] # select random slice A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item() A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() A : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ): A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self ): A , A , A , A : str = self.prepare_config_and_inputs() A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else () __lowerCamelCase : List[Any] = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def _lowerCAmelCase ( self ): A : Any = BertGenerationEncoderTester(self ) A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 ) def _lowerCAmelCase ( self ): self.config_tester.run_common_tests() def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs() A : Any = """bert""" self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ ) def _lowerCAmelCase ( self ): # This regression test was failing with PyTorch < 1.3 ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() A : int = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ) def _lowerCAmelCase ( self ): A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def _lowerCAmelCase ( self ): A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Union[str, Any] = model(lowerCamelCase__ )[0] A : List[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Tuple = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): A : Dict = model(lowerCamelCase__ )[0] A : List[str] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape, lowerCamelCase__ ) A : Optional[Any] = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
662
0
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : int=10 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]=2 , ): '''simple docstring''' lowercase : Optional[Any] =parent lowercase : List[str] =batch_size lowercase : Tuple =image_size lowercase : str =patch_size lowercase : Optional[Any] =num_channels lowercase : List[str] =is_training lowercase : Any =use_labels lowercase : str =hidden_size lowercase : str =num_hidden_layers lowercase : List[Any] =num_attention_heads lowercase : List[Any] =intermediate_size lowercase : Any =hidden_act lowercase : Optional[Any] =hidden_dropout_prob lowercase : Optional[int] =attention_probs_dropout_prob lowercase : Optional[int] =type_sequence_label_size lowercase : Optional[Any] =initializer_range lowercase : Dict =scope lowercase : Optional[int] =encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase : Dict =(image_size // patch_size) ** 2 lowercase : List[Any] =num_patches + 2 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Optional[Any] =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Any =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : int ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' lowercase : str =DeiTModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Dict =DeiTForMaskedImageModeling(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase : Dict =1 lowercase : Dict =DeiTForMaskedImageModeling(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase : Optional[Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : Dict =self.type_sequence_label_size lowercase : str =DeiTForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase : Union[str, Any] =1 lowercase : Optional[Any] =DeiTForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase : List[str] =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Optional[Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Optional[Any] =config_and_inputs lowercase : Tuple ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : str =DeiTModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Any =model_class(UpperCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase : List[Any] =model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : List[Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : Optional[Any] =[*signature.parameters.keys()] lowercase : Any =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any=False ): '''simple docstring''' lowercase : Tuple =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCamelCase_ ( self : str ): '''simple docstring''' if not self.model_tester.is_training: return lowercase , lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() lowercase : Dict =True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCAmelCase__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Dict =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase , lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowercase : Optional[Any] =False lowercase : Dict =True for model_class in self.all_model_classes: if model_class in get_values(UpperCAmelCase__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowercase : Any =model_class(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCAmelCase__ ) model.train() lowercase : str =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : Optional[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common() lowercase : Optional[int] =[ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ): lowercase : Optional[Any] =problem_type['''title'''] lowercase : int =problem_type['''num_labels'''] lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : str =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if problem_type["num_labels"] > 1: lowercase : List[str] =inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) lowercase : int =inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCAmelCase__ ) as warning_list: lowercase : int =model(**UpperCAmelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def lowerCamelCase_ ( self : int ): '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any =DeiTModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[str]: lowercase : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Any =DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to( UpperCAmelCase__ ) lowercase : Tuple =self.default_image_processor lowercase : Tuple =prepare_img() lowercase : Any =image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : str =model(**UpperCAmelCase__ ) # verify the logits lowercase : List[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Any =torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Union[str, Any] =DeiTModel.from_pretrained( '''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' ) lowercase : Optional[Any] =self.default_image_processor lowercase : List[Any] =prepare_img() lowercase : Union[str, Any] =image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : Optional[Any] =inputs.pixel_values.to(UpperCAmelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase : Optional[int] =model(UpperCAmelCase__ )
92
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : str = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384} A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Optional[Any] = do_resize A : Dict = size # Default value set here for backwards compatibility where the value in config is None A : Dict = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : List[str] = do_rescale A : Tuple = rescale_factor A : Optional[int] = do_normalize A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) A : List[str] = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : int = int(shortest_edge / crop_pct ) A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Dict = do_resize if do_resize is not None else self.do_resize A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct A : str = resample if resample is not None else self.resample A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor A : Dict = do_normalize if do_normalize is not None else self.do_normalize A : List[str] = image_mean if image_mean is not None else self.image_mean A : Optional[Any] = image_std if image_std is not None else self.image_std A : Optional[Any] = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ ) A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Dict = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
662
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Optional[Any] = DiTPipeline __magic_name__ :Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __magic_name__ :Dict = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } __magic_name__ :List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __magic_name__ :Dict = False def snake_case ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ :str = TransformeraDModel( sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__UpperCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=__UpperCAmelCase , ) lowerCAmelCase__ :int = AutoencoderKL() lowerCAmelCase__ :Union[str, Any] = DDIMScheduler() lowerCAmelCase__ :List[Any] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): lowerCAmelCase__ :int = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ :List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = 'cpu' lowerCAmelCase__ :str = self.get_dummy_components() lowerCAmelCase__ :Any = self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Dict = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 1_6, 1_6, 3) ) lowerCAmelCase__ :List[str] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] ) lowerCAmelCase__ :Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1E-3 ) def snake_case ( self ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=__UpperCAmelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def snake_case ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = torch.manual_seed(0 ) lowerCAmelCase__ :Optional[int] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) lowerCAmelCase__ :Union[str, Any] = ['vase', 'umbrella', 'white shark', 'white wolf'] lowerCAmelCase__ :Tuple = pipe.get_label_ids(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=4_0 , output_type='np' ).images for word, image in zip(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = load_numpy( F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) lowerCAmelCase__ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) lowerCAmelCase__ :Optional[int] = ['vase', 'umbrella'] lowerCAmelCase__ :Optional[Any] = pipe.get_label_ids(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase__ :int = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2_5 , output_type='np' ).images for word, image in zip(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' F"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1E-1
93
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" A : Dict = """backbone.""" if is_semantic else """""" A : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (f'''{prefix}cls_token''', """beit.embeddings.cls_token"""), (f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""), (f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""), (f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("""mask_token""", """beit.embeddings.mask_token"""), ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) else: # layernorm + classification head rename_keys.extend( [ ("""fc_norm.weight""", """beit.pooler.layernorm.weight"""), ("""fc_norm.bias""", """beit.pooler.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any: """simple docstring""" for i in range(config.num_hidden_layers ): A : Dict = """backbone.""" if is_semantic else """""" # queries, keys and values A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' ) A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' ) A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' ) A : int = in_proj_weight[ : config.hidden_size, : ] A : Any = q_bias A : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A : Tuple = in_proj_weight[ -config.hidden_size :, : ] A : Union[str, Any] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' ) A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' ) A : Dict = gamma_a A : Dict = gamma_a def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: """simple docstring""" A : List[str] = dct.pop(_lowerCAmelCase ) A : Optional[Any] = val def __UpperCamelCase ( ) -> List[str]: """simple docstring""" A : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str: """simple docstring""" A : Dict = False if """rvlcdip""" in checkpoint_url else True A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: A : Dict = 1024 A : List[Any] = 4096 A : int = 24 A : int = 16 # labels if "rvlcdip" in checkpoint_url: A : List[Any] = 16 A : List[Any] = """huggingface/label-files""" A : int = """rvlcdip-id2label.json""" A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} A : int = idalabel A : Union[str, Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""] A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase ) # load HuggingFace model A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase ) model.eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A : Any = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase ) A : int = prepare_img() A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) A : str = encoding["""pixel_values"""] A : Tuple = model(_lowerCAmelCase ) A : Optional[int] = outputs.logits # verify logits A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: if has_lm_head: A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large""" else: A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip""" image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
662
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE = { 'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'], 'configuration_maskformer_swin': ['MaskFormerSwinConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = ['MaskFormerFeatureExtractor'] SCREAMING_SNAKE_CASE = ['MaskFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = [ 'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'MaskFormerForInstanceSegmentation', 'MaskFormerModel', 'MaskFormerPreTrainedModel', ] SCREAMING_SNAKE_CASE = [ 'MaskFormerSwinBackbone', 'MaskFormerSwinModel', 'MaskFormerSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
94
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""", lowerCamelCase__, ) super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
662
0
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') lowerCamelCase_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} lowerCamelCase_ = '''>>zh<<''' lowerCamelCase_ = '''Helsinki-NLP/''' if is_torch_available(): lowerCamelCase_ = '''pt''' elif is_tf_available(): lowerCamelCase_ = '''tf''' else: lowerCamelCase_ = '''jax''' @require_sentencepiece class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = MarianTokenizer __magic_name__ = False __magic_name__ = True def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: super().setUp() UpperCAmelCase_ : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] UpperCAmelCase_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) UpperCAmelCase_ : Optional[Any] = Path(self.tmpdirname ) save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["target_spm"] ) UpperCAmelCase_ : str = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase_ : Tuple ) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]: return ( "This is a test", "This is a test", ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: UpperCAmelCase_ : List[str] = "</s>" UpperCAmelCase_ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(lowerCAmelCase_ ) , 9 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" ) UpperCAmelCase_ : Optional[int] = en_de_tokenizer(["I am a small frog"] , return_tensors=lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(lowerCAmelCase_ , batch.input_ids[0] ) UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = [x.name for x in Path(lowerCAmelCase_ ).glob("*" )] self.assertIn("source.spm" , lowerCAmelCase_ ) MarianTokenizer.from_pretrained(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: UpperCAmelCase_ : Optional[int] = self.get_tokenizer() UpperCAmelCase_ : Tuple = tok( ["I am a small frog" * 1_000, "I am a small frog"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: UpperCAmelCase_ : List[Any] = self.get_tokenizer() UpperCAmelCase_ : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: # fmt: off UpperCAmelCase_ : int = {"input_ids": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : int = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) UpperCAmelCase_ : Dict = "Tämä on testi" UpperCAmelCase_ : Optional[Any] = "This is a test" UpperCAmelCase_ : Union[str, Any] = [76, 7, 2_047, 2] UpperCAmelCase_ : Optional[int] = [69, 12, 11, 940, 2] UpperCAmelCase_ : int = tokenizer(lowerCAmelCase_ ).input_ids self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer(text_target=lowerCAmelCase_ ).input_ids self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
95
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, ) A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths} A : str = Text( cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, ) def _lowerCAmelCase ( self ): # Build iterable dataset if self.streaming: A : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A : List[str] = None A : Dict = None A : Tuple = None A : Tuple = None self.builder.download_and_prepare( download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, ) A : List[str] = self.builder.as_dataset( split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory ) return dataset
662
0
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def a ( __UpperCAmelCase : List[Any] ) -> str: __magic_name__: Union[str, Any] = args.pruning_method __magic_name__: Tuple = args.threshold __magic_name__: Dict = args.model_name_or_path.rstrip("""/""" ) __magic_name__: str = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) __magic_name__: Optional[Any] = torch.load(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ) __magic_name__: int = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __magic_name__: List[Any] = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: __magic_name__: List[str] = tensor print(f'Copied layer {name}' ) elif "bias" in name: __magic_name__: Union[str, Any] = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": __magic_name__: str = MagnitudeBinarizer.apply(inputs=__UpperCAmelCase , threshold=__UpperCAmelCase ) __magic_name__: Tuple = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue __magic_name__: str = name[:-6] __magic_name__: Dict = model[f'{prefix_}mask_scores'] __magic_name__: Any = TopKBinarizer.apply(__UpperCAmelCase , __UpperCAmelCase ) __magic_name__: List[Any] = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __magic_name__: List[Any] = name[:-6] __magic_name__: List[Any] = model[f'{prefix_}mask_scores'] __magic_name__: Tuple = ThresholdBinarizer.apply(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __magic_name__: int = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue __magic_name__: Dict = name[:-6] __magic_name__: int = model[f'{prefix_}mask_scores'] __magic_name__, __magic_name__: Tuple = -0.1, 1.1 __magic_name__: Tuple = torch.sigmoid(__UpperCAmelCase ) __magic_name__: Any = s * (r - l) + l __magic_name__: Optional[int] = s_bar.clamp(min=0.0 , max=1.0 ) __magic_name__: Union[str, Any] = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: __magic_name__: List[str] = os.path.join( os.path.dirname(__UpperCAmelCase ) , f'bertarized_{os.path.basename(__UpperCAmelCase )}' ) if not os.path.isdir(__UpperCAmelCase ): shutil.copytree(__UpperCAmelCase , __UpperCAmelCase ) print(f'\nCreated folder {target_model_path}' ) torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) __lowerCamelCase = parser.parse_args() main(args)
96
from typing import TYPE_CHECKING from ....utils import _LazyModule SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
662
0
from __future__ import annotations import math def a ( snake_case__: list , snake_case__: list ): '''simple docstring''' if len(snake_case__ ) != 2 or len(a[0] ) != 2 or len(snake_case__ ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) lowercase_ = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def a ( snake_case__: list , snake_case__: list ): '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(snake_case__ ) ) ] def a ( snake_case__: list , snake_case__: list ): '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(snake_case__ ) ) ] def a ( snake_case__: list ): '''simple docstring''' if len(snake_case__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) lowercase_ = len(snake_case__ ) lowercase_ = matrix_length // 2 lowercase_ = [[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ )] lowercase_ = [ [a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ , snake_case__ ) ] lowercase_ = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ )] lowercase_ = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ , snake_case__ )] return top_left, top_right, bot_left, bot_right def a ( snake_case__: list ): '''simple docstring''' return len(snake_case__ ), len(matrix[0] ) def a ( snake_case__: list ): '''simple docstring''' print('''\n'''.join(str(snake_case__ ) for line in matrix ) ) def a ( snake_case__: list , snake_case__: list ): '''simple docstring''' if matrix_dimensions(snake_case__ ) == (2, 2): return default_matrix_multiplication(snake_case__ , snake_case__ ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(snake_case__ ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(snake_case__ ) lowercase_ = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) ) lowercase_ = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) lowercase_ = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) lowercase_ = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) ) lowercase_ = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) lowercase_ = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) lowercase_ = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) lowercase_ = matrix_addition(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ ) lowercase_ = matrix_addition(snake_case__ , snake_case__ ) lowercase_ = matrix_addition(snake_case__ , snake_case__ ) lowercase_ = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ ) # construct the new matrix from our 4 quadrants lowercase_ = [] for i in range(len(snake_case__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(snake_case__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def a ( snake_case__: list , snake_case__: list ): '''simple docstring''' if matrix_dimensions(snake_case__ )[1] != matrix_dimensions(snake_case__ )[0]: lowercase_ = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F'''Matrix A: {matrixa}\n''' F'''Matrix B: {matrixa}''' ) raise Exception(snake_case__ ) lowercase_ = matrix_dimensions(snake_case__ ) lowercase_ = matrix_dimensions(snake_case__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] lowercase_ = max(*snake_case__ , *snake_case__ ) lowercase_ = int(math.pow(2 , math.ceil(math.loga(snake_case__ ) ) ) ) lowercase_ = matrixa lowercase_ = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , snake_case__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) lowercase_ = actual_strassen(snake_case__ , snake_case__ ) # Removing the additional zeros for i in range(0 , snake_case__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": __a = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] __a = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
97
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int: """simple docstring""" A , A : str = 1, 1 A : List[Any] = [] for i in range(1 , n + 1 ): A : Optional[int] = prev_numerator + 2 * prev_denominator A : Any = prev_numerator + prev_denominator if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ): result.append(_lowerCAmelCase ) A : int = numerator A : int = denominator return len(_lowerCAmelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
662
0
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowercase__ : Optional[int] = 50_00_00 lowercase__ , lowercase__ : List[str] = os.path.split(__file__) lowercase__ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json')) @get_duration def a__ ( lowercase : datasets.Dataset, **lowercase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = dataset.map(**lowercase ) @get_duration def a__ ( lowercase : datasets.Dataset, **lowercase : Optional[Any] ) -> str: """simple docstring""" _UpperCamelCase = dataset.filter(**lowercase ) def a__ ( ) -> Any: """simple docstring""" _UpperCamelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) _UpperCamelCase = generate_example_dataset( os.path.join(lowercase, '''dataset.arrow''' ), lowercase, num_examples=lowercase ) _UpperCamelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=lowercase ) def tokenize(lowercase : List[Any] ): return tokenizer(examples['''text'''] ) _UpperCamelCase = map(lowercase ) _UpperCamelCase = map(lowercase, batched=lowercase ) _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) with dataset.formatted_as(type='''numpy''' ): _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) with dataset.formatted_as(type='''pandas''' ): _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) with dataset.formatted_as(type='''torch''', columns='''numbers''' ): _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ): _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) _UpperCamelCase = map(lowercase, function=lowercase, batched=lowercase ) _UpperCamelCase = filter(lowercase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase, '''wb''' ) as f: f.write(json.dumps(lowercase ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
98
import re def __UpperCamelCase ( _lowerCAmelCase ) -> str: """simple docstring""" if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
662
0
import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class __UpperCAmelCase : """simple docstring""" def __init__( self , __A ): if isinstance(__A , __A ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden __a = deepcopy(__A ) elif os.path.exists(__A ): with io.open(__A , """r""" , encoding="""utf-8""" ) as f: __a = json.load(__A ) else: try: __a = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" ) __a = json.loads(__A ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' ) __a = config self.set_stage_and_offload() def snake_case_ ( self ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. __a = self.get_value("""zero_optimization.stage""" , -1 ) # offload __a = False if self.is_zeroa() or self.is_zeroa(): __a = set(["""cpu""", """nvme"""] ) __a = set( [ self.get_value("""zero_optimization.offload_optimizer.device""" ), self.get_value("""zero_optimization.offload_param.device""" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: __a = True def snake_case_ ( self , __A ): __a = self.config # find the config node of interest if it exists __a = ds_key_long.split(""".""" ) __a = nodes.pop() for node in nodes: __a = config.get(__A ) if config is None: return None, ds_key return config, ds_key def snake_case_ ( self , __A , __A=None ): __a , __a = self.find_config_node(__A ) if config is None: return default return config.get(__A , __A ) def snake_case_ ( self , __A , __A=False ): __a = self.config # find the config node of interest if it exists __a = ds_key_long.split(""".""" ) for node in nodes: __a = config __a = config.get(__A ) if config is None: if must_exist: raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' ) else: return # if found remove it if parent_config is not None: parent_config.pop(__A ) def snake_case_ ( self , __A ): __a = self.get_value(__A ) return False if value is None else bool(__A ) def snake_case_ ( self , __A ): __a = self.get_value(__A ) return False if value is None else not bool(__A ) def snake_case_ ( self ): return self._stage == 2 def snake_case_ ( self ): return self._stage == 3 def snake_case_ ( self ): return self._offload class __UpperCAmelCase : """simple docstring""" def __init__( self , __A ): __a = engine def snake_case_ ( self , __A , **__A ): # runs backpropagation and handles mixed precision self.engine.backward(__A , **__A ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class __UpperCAmelCase ( __A ): """simple docstring""" def __init__( self , __A ): super().__init__(__A , device_placement=__A , scaler=__A ) __a = hasattr(self.optimizer , """overflow""" ) def snake_case_ ( self , __A=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def snake_case_ ( self ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def snake_case_ ( self ): if self.__has_overflow__: return self.optimizer.overflow return False class __UpperCAmelCase ( __A ): """simple docstring""" def __init__( self , __A , __A ): super().__init__(__A , __A ) def snake_case_ ( self ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class __UpperCAmelCase : """simple docstring""" def __init__( self , __A , __A=0.001 , __A=0 , **__A ): __a = params __a = lr __a = weight_decay __a = kwargs class __UpperCAmelCase : """simple docstring""" def __init__( self , __A , __A=None , __A=0 , **__A ): __a = optimizer __a = total_num_steps __a = warmup_num_steps __a = kwargs
99
from __future__ import annotations SCREAMING_SNAKE_CASE_:Tuple = """#""" class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self ): A : dict = {} def _lowerCAmelCase ( self, lowerCamelCase__ ): A : List[Any] = self._trie for char in text: if char not in trie: A : str = {} A : str = trie[char] A : Optional[int] = True def _lowerCAmelCase ( self, lowerCamelCase__ ): A : Dict = self._trie for char in prefix: if char in trie: A : Optional[Any] = trie[char] else: return [] return self._elements(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__ ): A : int = [] for c, v in d.items(): A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )] result.extend(lowerCamelCase__ ) return tuple(lowerCamelCase__ ) SCREAMING_SNAKE_CASE_:Any = Trie() SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def __UpperCamelCase ( _lowerCAmelCase ) -> tuple: """simple docstring""" A : List[str] = trie.find_word(_lowerCAmelCase ) return tuple(string + word for word in suffixes ) def __UpperCamelCase ( ) -> None: """simple docstring""" print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
662
0