code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowercase ( A_="" )-> str: '''simple docstring''' a : Tuple = tempfile.mkdtemp() return os.path.join(A_ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[Any]): a : Union[str, Any] = torch.rand(12 , dtype=torch.floataa) - 0.5 a : Any = AgentAudio(__UpperCAmelCase) a : List[str] = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type.to_raw() , atol=1e-4)) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(__UpperCAmelCase)) # Ensure that the file contains the same value as the original tensor a , a : Union[str, Any] = sf.read(__UpperCAmelCase) self.assertTrue(torch.allclose(__UpperCAmelCase , torch.tensor(__UpperCAmelCase) , atol=1e-4)) def __snake_case ( self : Optional[Any]): a : str = torch.rand(12 , dtype=torch.floataa) - 0.5 a : int = get_new_path(suffix=".wav") sf.write(__UpperCAmelCase , __UpperCAmelCase , 16000) a : List[str] = AgentAudio(__UpperCAmelCase) self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type.to_raw() , atol=1e-4)) self.assertEqual(agent_type.to_string() , __UpperCAmelCase) @require_vision @require_torch class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[str]): a : Dict = torch.randint(0 , 256 , (64, 64, 3)) a : str = AgentImage(__UpperCAmelCase) a : str = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type._tensor , atol=1e-4)) self.assertIsInstance(agent_type.to_raw() , Image.Image) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__UpperCAmelCase)) def __snake_case ( self : int): a : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" a : int = Image.open(__UpperCAmelCase) a : Any = AgentImage(__UpperCAmelCase) self.assertTrue(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__UpperCAmelCase)) def __snake_case ( self : str): a : int = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" a : str = Image.open(__UpperCAmelCase) a : Dict = AgentImage(__UpperCAmelCase) self.assertFalse(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__UpperCAmelCase)) class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): a : Dict = "Hey!" a : Tuple = AgentText(__UpperCAmelCase) self.assertEqual(__UpperCAmelCase , agent_type.to_string()) self.assertEqual(__UpperCAmelCase , agent_type.to_raw()) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
1
"""simple docstring""" def lowercase ( A_ = 200 )-> int: '''simple docstring''' a : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 200] a : Optional[int] = [0] * (pence + 1) a : Tuple = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(A_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73682
40
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
1
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ , A_ , A_ , A_ )-> str: '''simple docstring''' a : Dict = original_name.split("." )[0] a : Any = key.split("." ) a : Optional[Any] = int(key_list[key_list.index(A_ ) - 2] ) a : Union[str, Any] = int(key_list[key_list.index(A_ ) - 1] ) a : Optional[Any] = orig_block_num - offset a : Optional[int] = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowercase ( A_ )-> Optional[int]: '''simple docstring''' a : Union[str, Any] = OrderedDict() a , a : str = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): a : Union[str, Any] = key.replace("network" , "poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 a : List[str] = key[: key.find("proj" )] a : List[Any] = key.replace(A_ , F'''patch_embeddings.{total_embed_found}.''' ) a : Any = key.replace("proj" , "projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: a : int = "poolformer.encoder." + key if "mlp.fc1" in key: a : Optional[Any] = replace_key_with_offset(A_ , A_ , "mlp.fc1" , "output.conv1" ) if "mlp.fc2" in key: a : Tuple = replace_key_with_offset(A_ , A_ , "mlp.fc2" , "output.conv2" ) if "norm1" in key: a : str = replace_key_with_offset(A_ , A_ , "norm1" , "before_norm" ) if "norm2" in key: a : str = replace_key_with_offset(A_ , A_ , "norm2" , "after_norm" ) if "layer_scale_1" in key: a : Any = replace_key_with_offset(A_ , A_ , "layer_scale_1" , "layer_scale_1" ) if "layer_scale_2" in key: a : Optional[Any] = replace_key_with_offset(A_ , A_ , "layer_scale_2" , "layer_scale_2" ) if "head" in key: a : Tuple = key.replace("head" , "classifier" ) a : Any = value return new_state_dict def lowercase ( )-> Optional[int]: '''simple docstring''' a : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" a : Optional[int] = Image.open(requests.get(A_ , stream=A_ ).raw ) return image @torch.no_grad() def lowercase ( A_ , A_ , A_ )-> Optional[int]: '''simple docstring''' a : int = PoolFormerConfig() # set attributes based on model_name a : Dict = "huggingface/label-files" a : str = model_name[-3:] a : Tuple = 1_000 a : List[Any] = "imagenet-1k-id2label.json" a : int = (1, 1_000) # set config attributes a : int = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Any = {int(A_ ): v for k, v in idalabel.items()} a : str = idalabel a : List[Any] = {v: k for k, v in idalabel.items()} if size == "s12": a : Optional[Any] = [2, 2, 6, 2] a : Any = [64, 128, 320, 512] a : Any = 4.0 a : Optional[int] = 0.9 elif size == "s24": a : Tuple = [4, 4, 12, 4] a : Optional[int] = [64, 128, 320, 512] a : Any = 4.0 a : List[Any] = 0.9 elif size == "s36": a : List[str] = [6, 6, 18, 6] a : Dict = [64, 128, 320, 512] a : Optional[Any] = 4.0 a : Union[str, Any] = 1e-6 a : List[Any] = 0.9 elif size == "m36": a : Union[str, Any] = [6, 6, 18, 6] a : Any = [96, 192, 384, 768] a : Union[str, Any] = 4.0 a : Tuple = 1e-6 a : Dict = 0.9_5 elif size == "m48": a : Tuple = [8, 8, 24, 8] a : Optional[int] = [96, 192, 384, 768] a : Tuple = 4.0 a : Dict = 1e-6 a : Any = 0.9_5 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor a : List[str] = PoolFormerImageProcessor(crop_pct=A_ ) # Prepare image a : List[Any] = prepare_img() a : Tuple = image_processor(images=A_ , return_tensors="pt" ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict a : Optional[int] = torch.load(A_ , map_location=torch.device("cpu" ) ) # rename keys a : Any = rename_keys(A_ ) # create HuggingFace model and load state dict a : Any = PoolFormerForImageClassification(A_ ) model.load_state_dict(A_ ) model.eval() # Define image processor a : List[Any] = PoolFormerImageProcessor(crop_pct=A_ ) a : int = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values # forward pass a : Optional[int] = model(A_ ) a : Any = outputs.logits # define expected logit slices for different models if size == "s12": a : Tuple = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] ) elif size == "s24": a : Tuple = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] ) elif size == "s36": a : Optional[Any] = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] ) elif size == "m36": a : List[str] = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] ) elif size == "m48": a : Dict = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , A_ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(A_ ).mkdir(exist_ok=A_ ) model.save_pretrained(A_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""poolformer_s12""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) __lowercase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
40
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
1
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": __lowercase = argparse.ArgumentParser( description=( """Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""]) parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") __lowercase = parser.parse_args() if args.model_type == "bert": __lowercase = BertForMaskedLM.from_pretrained(args.model_name) __lowercase = """bert""" else: raise ValueError("""args.model_type should be \"bert\".""") __lowercase = model.state_dict() __lowercase = {} for w in ["word_embeddings", "position_embeddings"]: __lowercase = state_dict[f'''{prefix}.embeddings.{w}.weight'''] for w in ["weight", "bias"]: __lowercase = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}'''] __lowercase = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: __lowercase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}''' ] __lowercase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}''' ] __lowercase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}''' ] __lowercase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}''' ] __lowercase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}''' ] __lowercase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}''' ] __lowercase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}''' ] __lowercase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}''' ] std_idx += 1 __lowercase = state_dict["""cls.predictions.decoder.weight"""] __lowercase = state_dict["""cls.predictions.bias"""] if args.vocab_transform: for w in ["weight", "bias"]: __lowercase = state_dict[f'''cls.predictions.transform.dense.{w}'''] __lowercase = state_dict[f'''cls.predictions.transform.LayerNorm.{w}'''] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
40
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
1
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowercase ( A_=32 , A_=10 , A_=100 , A_=1_026 , A_=True , A_="data/tokenized_stories_train_wikitext103.jbl" , A_="igf_context_pairs.jbl" , )-> List[str]: '''simple docstring''' set_seed(3 ) # generate train_data and objective_set a , a : Dict = generate_datasets( A_ , A_ , number=A_ , min_len=1_026 , trim=A_ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? a : List[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model a : Union[str, Any] = load_gpta("gpt2" ).to(A_ ) print("computing perplexity on objective set" ) a : int = compute_perplexity(A_ , A_ , A_ ).item() print("perplexity on objective set:" , A_ ) # collect igf pairs and save to file demo.jbl collect_objective_set(A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowercase ( A_ , A_=15 , A_=128 , A_=100 , A_="igf_model.pt" , )-> Any: '''simple docstring''' set_seed(42 ) # Load pre-trained model a : int = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model a : int = SecondaryLearner(A_ ) # Train secondary learner a : Optional[Any] = train_secondary_learner( A_ , A_ , max_epochs=A_ , batch_size=A_ , eval_freq=100 , igf_model_path=A_ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowercase ( A_ , A_ , A_ , A_=32 , A_=1_000 , A_=16 , A_=1.0 , A_=recopy_gpta , A_=None , A_=10 , A_="gpt2_finetuned.pt" , )-> Union[str, Any]: '''simple docstring''' a : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) a : List[str] = RandomSampler(A_ ) a : Tuple = DataLoader(A_ , sampler=A_ ) a : Union[str, Any] = max_steps // (len(A_ )) + 1 a : List[Any] = 0 a : str = torch.zeros((1, context_len) , dtype=torch.long , device=A_ ) a , a , a : Dict = recopy_model(A_ , A_ , A_ ) model.train() if secondary_learner is not None: secondary_learner.to(A_ ) secondary_learner.eval() a : Tuple = [] a : List[Any] = 0 a : str = [] a : Optional[Any] = [] # Compute the performance of the transformer model at the beginning a : Union[str, Any] = compute_perplexity(A_ , A_ , A_ ) test_perps.append(A_ ) print("Test perplexity, step" , A_ , ":" , A_ ) for epoch in range(int(A_ ) ): for step, example in enumerate(A_ ): torch.cuda.empty_cache() a : str = random.randint(0 , example.size(2 ) - context_len - 1 ) a : List[Any] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() a : List[str] = model(A_ , labels=A_ ) a : Optional[Any] = True if secondary_learner is not None: a : Tuple = secondary_learner.forward( torch.tensor(A_ , dtype=torch.long , device=A_ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(A_ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: a : List[Any] = -1 if predicted_q < threshold: a : Tuple = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) a : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() a : Dict = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: a : Dict = compute_perplexity(A_ , A_ , A_ ) test_perps.append(A_ ) print("Test perplexity, step" , A_ , ":" , A_ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , A_ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowercase ( )-> Optional[int]: '''simple docstring''' a : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=A_ , default=A_ , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=A_ , default=A_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=A_ , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=A_ , default=A_ , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=A_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=100 , type=A_ , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=100 , type=A_ , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=1_000 , type=A_ , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=128 , type=A_ , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=A_ , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=A_ , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=100 , type=A_ , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=1_026 , type=A_ , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=A_ , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=A_ , type=A_ , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=A_ , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=A_ , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=A_ , type=A_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=A_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner a : Union[str, Any] = joblib.load("data/IGF_values.jbl" ) # Train secondary learner a : Optional[Any] = training_secondary_learner( A_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model a : List[str] = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model a , a : str = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1_026 , trim=A_ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( A_ , A_ , A_ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=A_ , secondary_learner=A_ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
40
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __lowercase = False class _A ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Dict): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Any): a : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg") a : Optional[Any] = torch.manual_seed(0) a : Union[str, Any] = pipe.dual_guided( prompt="first prompt" , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__UpperCAmelCase) a : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : int = generator.manual_seed(0) a : Any = pipe.dual_guided( prompt="first prompt" , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass" def __snake_case ( self : List[str]): a : str = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = "cyberpunk 2077" a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg") a : List[str] = torch.manual_seed(0) a : Optional[int] = pipe.dual_guided( prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images a : Any = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) a : str = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 a : Union[str, Any] = "A painting of a squirrel eating a burger " a : Any = torch.manual_seed(0) a : Tuple = pipe.text_to_image( prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy").images a : List[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) a : str = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 a : Dict = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type="numpy").images a : Dict = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) a : Any = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
40
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
1
"""simple docstring""" __lowercase = { "joule": 1.0, "kilojoule": 1000, "megajoule": 1000000, "gigajoule": 1000000000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 3600000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 4186800.00, "electronvolt": 1.602176634e-19, "britishthermalunit_it": 1055.05585, "footpound": 1.35_58_18, } def lowercase ( A_ , A_ , A_ )-> float: '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: a : Optional[int] = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {", ".join(A_ )}''' ) raise ValueError(A_ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def lowercase ( A_ )-> Optional[int]: '''simple docstring''' a : str = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) def lowercase ( A_ )-> Optional[Any]: '''simple docstring''' a : Tuple = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: a : Union[str, Any] = s_dict.pop(A_ ) elif "subsample" in key: a : Optional[Any] = s_dict.pop(A_ ) def lowercase ( A_ )-> Optional[int]: '''simple docstring''' a , a : int = emb.weight.shape a : List[str] = nn.Linear(A_ , A_ , bias=A_ ) a : List[Any] = emb.weight.data return lin_layer def lowercase ( A_ , A_ )-> Dict: '''simple docstring''' a : Optional[Any] = torch.load(A_ , map_location="cpu" ) a : Union[str, Any] = mam_aaa["args"] a : Tuple = mam_aaa["model"] a : Tuple = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(A_ ) rename_keys(A_ ) a : str = state_dict["decoder.embed_tokens.weight"].shape[0] a : List[Any] = args.share_decoder_input_output_embed a : str = [int(A_ ) for i in args.conv_kernel_sizes.split("," )] a : Optional[int] = SpeechaTextConfig( vocab_size=A_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(A_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=A_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=A_ , num_beams=5 , max_length=200 , use_cache=A_ , decoder_start_token_id=2 , early_stopping=A_ , ) a : Dict = SpeechaTextForConditionalGeneration(A_ ) a , a : List[Any] = model.model.load_state_dict(A_ , strict=A_ ) if len(A_ ) > 0 and not set(A_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F''' but all the following weights are missing {missing}''' ) if tie_embeds: a : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: a : Union[str, Any] = lm_head_weights model.save_pretrained(A_ ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") __lowercase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
40
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple=99 , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : Dict=9 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]=32 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : List[str]=37 , __UpperCAmelCase : Dict=8 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Optional[int]=0.002 , __UpperCAmelCase : int=1 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[int]=None , ): a : Any = parent a : int = batch_size a : List[Any] = encoder_seq_length a : str = decoder_seq_length # For common tests a : int = self.decoder_seq_length a : int = is_training a : List[Any] = use_attention_mask a : Union[str, Any] = use_labels a : Union[str, Any] = vocab_size a : Any = hidden_size a : int = num_hidden_layers a : Optional[Any] = num_attention_heads a : Any = d_ff a : str = relative_attention_num_buckets a : Tuple = dropout_rate a : Tuple = initializer_factor a : Optional[int] = eos_token_id a : Tuple = pad_token_id a : List[Any] = decoder_start_token_id a : Optional[Any] = None a : List[Any] = decoder_layers def __snake_case ( self : int): return TaConfig.from_pretrained("google/umt5-base") def __snake_case ( self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=None , ): if attention_mask is None: a : Tuple = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: a : Tuple = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: a : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase) if decoder_head_mask is None: a : List[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase) if cross_attn_head_mask is None: a : Dict = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __snake_case ( self : List[str]): a : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size) a : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input a : Any = input_ids.clamp(self.pad_token_id + 1) a : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1) a : Any = self.get_config() a : int = config.num_attention_heads a : List[Any] = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) return config, input_dict def __snake_case ( self : Any): a , a : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def __snake_case ( self : List[str]): return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __snake_case ( self : List[Any]): return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __snake_case ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , ): a : int = UMTaModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a : Optional[int] = model( input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , ) a : int = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase) a : Any = result.last_hidden_state a : Optional[int] = result.past_key_values a : str = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__UpperCAmelCase) , config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]) , 4) def __snake_case ( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , ): a : List[Any] = UMTaModel(config=__UpperCAmelCase).get_decoder().to(__UpperCAmelCase).eval() # first forward pass a : Optional[Any] = model(__UpperCAmelCase , use_cache=__UpperCAmelCase) a : Union[str, Any] = model(__UpperCAmelCase) a : List[str] = model(__UpperCAmelCase , use_cache=__UpperCAmelCase) self.parent.assertTrue(len(__UpperCAmelCase) == len(__UpperCAmelCase)) self.parent.assertTrue(len(__UpperCAmelCase) == len(__UpperCAmelCase) + 1) a , a : Any = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids a : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size) # append to next input_ids and a : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1) a : Dict = model(__UpperCAmelCase)["last_hidden_state"] a : Any = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase)["last_hidden_state"] # select random slice a : int = ids_tensor((1,) , output_from_past.shape[-1]).item() a : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach() a : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3)) def __snake_case ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , ): a : str = UMTaModel(config=__UpperCAmelCase).to(__UpperCAmelCase).half().eval() a : Any = model(**__UpperCAmelCase)["last_hidden_state"] self.parent.assertFalse(torch.isnan(__UpperCAmelCase).any().item()) @require_torch class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : int = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) UpperCAmelCase : Any = (UMTaForConditionalGeneration,) if is_torch_available() else () UpperCAmelCase : Union[str, Any] = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = False UpperCAmelCase : str = False UpperCAmelCase : Dict = True UpperCAmelCase : Dict = True # The small UMT5 model needs higher percentages for CPU/MP tests UpperCAmelCase : List[Any] = [0.8, 0.9] def __snake_case ( self : List[str]): a : str = UMTaModelTester(self) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def __snake_case ( self : Tuple): a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() a : List[Any] = UMTaModel(config_and_inputs[0]).to(__UpperCAmelCase) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__UpperCAmelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision") def __snake_case ( self : Union[str, Any]): a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase) def __snake_case ( self : List[str]): a : Dict = ["encoder_attentions", "decoder_attentions", "cross_attentions"] a : int = self.model_tester.prepare_config_and_inputs() a : List[str] = config_and_inputs[0] a : Optional[Any] = UMTaForConditionalGeneration(__UpperCAmelCase).eval() model.to(__UpperCAmelCase) a : Optional[Any] = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase), } for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items()): a : Any = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": a : Union[str, Any] = torch.ones( config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase) a : Optional[int] = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step a : Tuple = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def __snake_case ( self : List[Any]): pass @require_torch @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged") def __snake_case ( self : Any): a : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=__UpperCAmelCase).to(__UpperCAmelCase) a : Optional[Any] = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase) a : List[Any] = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] a : Any = tokenizer(__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase).input_ids # fmt: off a : str = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ]) # fmt: on torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase) a : int = model.generate(input_ids.to(__UpperCAmelCase)) a : List[str] = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] a : str = tokenizer.batch_decode(__UpperCAmelCase) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
40
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def lowercase ( )-> List[Any]: '''simple docstring''' a : Any = argparse.ArgumentParser() parser.add_argument("--model_ckpt" , type=A_ , default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs" , type=A_ , default=5 ) parser.add_argument("--batch_size" , type=A_ , default=6 ) parser.add_argument("--gradient_accumulation_steps" , type=A_ , default=1 ) parser.add_argument("--freeze" , type=A_ , default=A_ ) parser.add_argument("--learning_rate" , type=A_ , default=5e-4 ) parser.add_argument("--seed" , type=A_ , default=0 ) parser.add_argument("--lr_scheduler_type" , type=A_ , default="cosine" ) parser.add_argument("--num_warmup_steps" , type=A_ , default=10 ) parser.add_argument("--weight_decay" , type=A_ , default=0.0_1 ) parser.add_argument("--output_dir" , type=A_ , default="./results" ) return parser.parse_args() __lowercase = load("""accuracy""") def lowercase ( A_ )-> Tuple: '''simple docstring''' a , a : List[str] = eval_pred a : str = np.argmax(A_ , axis=1 ) return metric.compute(predictions=A_ , references=A_ ) class _A ( _a ): """simple docstring""" def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]): super().__init__() a : int = trainer def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , **__UpperCAmelCase : List[Any]): if control.should_evaluate: a : Union[str, Any] = deepcopy(__UpperCAmelCase) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train") return control_copy def lowercase ( )-> Union[str, Any]: '''simple docstring''' a : List[str] = get_args() set_seed(args.seed ) a : int = load_dataset("codeparrot/codecomplex" , split="train" ) a : Dict = dataset.train_test_split(test_size=0.2 ) a : int = train_test["test"].train_test_split(test_size=0.5 ) a : Union[str, Any] = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) a : Optional[int] = tokenizer.eos_token a : Any = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) a : Any = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): a : Optional[Any] = False a : Union[str, Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(A_ ): a : List[str] = tokenizer(example["src"] , truncation=A_ , max_length=1_024 ) a : Optional[int] = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } a : Optional[Any] = train_test_validation.map( A_ , batched=A_ , remove_columns=train_test_validation["train"].column_names , ) a : Dict = DataCollatorWithPadding(tokenizer=A_ ) a : Optional[Any] = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , ) a : Union[str, Any] = Trainer( model=A_ , args=A_ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=A_ , data_collator=A_ , compute_metrics=A_ , ) print("Training..." ) trainer.add_callback(CustomCallback(A_ ) ) trainer.train() if __name__ == "__main__": main()
40
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
1
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=2 , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Optional[int]=10 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Union[str, Any]=32 * 8 , __UpperCAmelCase : Tuple=32 * 8 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : Dict=64 , ): a : Union[str, Any] = parent a : Dict = batch_size a : List[str] = is_training a : str = use_auxiliary_loss a : Union[str, Any] = num_queries a : List[Any] = num_channels a : Union[str, Any] = min_size a : Dict = max_size a : str = num_labels a : Optional[Any] = hidden_dim a : str = hidden_dim def __snake_case ( self : str): a : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( __UpperCAmelCase) a : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase) a : List[Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase) > 0.5 ).float() a : str = (torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase) > 0.5).long() a : str = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __snake_case ( self : Union[str, Any]): a : Any = MaskaFormerConfig( hidden_size=self.hidden_dim , ) a : Optional[Any] = self.num_queries a : Union[str, Any] = self.num_labels a : List[Any] = [1, 1, 1, 1] a : Union[str, Any] = self.num_channels a : Union[str, Any] = 64 a : List[str] = 128 a : Dict = self.hidden_dim a : Any = self.hidden_dim a : Optional[int] = self.hidden_dim return config def __snake_case ( self : str): a , a , a , a , a : Optional[int] = self.prepare_config_and_inputs() a : Optional[int] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def __snake_case ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]): a : Union[str, Any] = output.encoder_hidden_states a : int = output.pixel_decoder_hidden_states a : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase) , len(config.backbone_config.depths)) self.parent.assertTrue(len(__UpperCAmelCase) , len(config.backbone_config.depths)) self.parent.assertTrue(len(__UpperCAmelCase) , config.decoder_layers) def __snake_case ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : str=False): with torch.no_grad(): a : Union[str, Any] = MaskaFormerModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a : Union[str, Any] = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase) a : Dict = model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str): a : str = MaskaFormerForUniversalSegmentation(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() def comm_check_on_output(__UpperCAmelCase : List[Any]): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): a : List[Any] = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase) a : List[Any] = model(__UpperCAmelCase) comm_check_on_output(__UpperCAmelCase) a : Optional[int] = model( pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase) comm_check_on_output(__UpperCAmelCase) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class _A ( _a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Any = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () UpperCAmelCase : Optional[int] = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} UpperCAmelCase : Any = False UpperCAmelCase : Any = False UpperCAmelCase : int = False UpperCAmelCase : List[Any] = False def __snake_case ( self : str): a : Tuple = MaskaFormerModelTester(self) a : Union[str, Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase) def __snake_case ( self : List[str]): self.config_tester.run_common_tests() def __snake_case ( self : List[Any]): a , a : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase) def __snake_case ( self : Optional[Any]): a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__UpperCAmelCase) @unittest.skip(reason="Mask2Former does not use inputs_embeds") def __snake_case ( self : Any): pass @unittest.skip(reason="Mask2Former does not have a get_input_embeddings method") def __snake_case ( self : Any): pass @unittest.skip(reason="Mask2Former is not a generative model") def __snake_case ( self : List[str]): pass @unittest.skip(reason="Mask2Former does not use token embeddings") def __snake_case ( self : Dict): pass @require_torch_multi_gpu @unittest.skip( reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`") def __snake_case ( self : str): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.") def __snake_case ( self : List[Any]): pass def __snake_case ( self : List[str]): a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Tuple = model_class(__UpperCAmelCase) a : List[str] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a : Tuple = [*signature.parameters.keys()] a : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase) @slow def __snake_case ( self : List[Any]): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: a : Any = MaskaFormerModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def __snake_case ( self : Optional[int]): a : int = (self.model_tester.min_size,) * 2 a : Dict = { "pixel_values": torch.randn((2, 3, *size) , device=__UpperCAmelCase), "mask_labels": torch.randn((2, 10, *size) , device=__UpperCAmelCase), "class_labels": torch.zeros(2 , 10 , device=__UpperCAmelCase).long(), } a : Any = self.model_tester.get_config() a : Any = MaskaFormerForUniversalSegmentation(__UpperCAmelCase).to(__UpperCAmelCase) a : List[Any] = model(**__UpperCAmelCase) self.assertTrue(outputs.loss is not None) def __snake_case ( self : Union[str, Any]): a , a : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase) def __snake_case ( self : Optional[Any]): a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : int = model_class(__UpperCAmelCase).to(__UpperCAmelCase) a : List[str] = model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase) self.assertTrue(outputs.attentions is not None) def __snake_case ( self : Tuple): if not self.model_tester.is_training: return a : Tuple = self.all_model_classes[1] a , a , a , a , a : List[str] = self.model_tester.prepare_config_and_inputs() a : Dict = model_class(__UpperCAmelCase) model.to(__UpperCAmelCase) model.train() a : Optional[int] = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase).loss loss.backward() def __snake_case ( self : str): a : str = self.all_model_classes[1] a , a , a , a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() a : Dict = True a : str = True a : Tuple = model_class(__UpperCAmelCase).to(__UpperCAmelCase) model.train() a : Union[str, Any] = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase) a : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() a : int = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) __lowercase = 1e-4 def lowercase ( )-> Union[str, Any]: '''simple docstring''' a : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _A ( unittest.TestCase ): """simple docstring""" @cached_property def __snake_case ( self : Any): return "facebook/mask2former-swin-small-coco-instance" @cached_property def __snake_case ( self : Dict): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def __snake_case ( self : Union[str, Any]): a : str = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(__UpperCAmelCase) a : List[str] = self.default_image_processor a : int = prepare_img() a : Union[str, Any] = image_processor(__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase) a : Optional[int] = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 384, 384)) with torch.no_grad(): a : int = model(**__UpperCAmelCase) a : Tuple = torch.tensor( [[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]]).to(__UpperCAmelCase) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase)) a : Union[str, Any] = torch.tensor( [[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]]).to(__UpperCAmelCase) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase)) a : Dict = torch.tensor( [[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]]).to(__UpperCAmelCase) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase)) def __snake_case ( self : Optional[Any]): a : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(__UpperCAmelCase).eval() a : List[str] = self.default_image_processor a : Optional[Any] = prepare_img() a : Dict = image_processor(__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase) a : List[Any] = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 384, 384)) with torch.no_grad(): a : Optional[int] = model(**__UpperCAmelCase) # masks_queries_logits a : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) a : List[Any] = [ [-8.7_839, -9.0_056, -8.8_121], [-7.4_104, -7.0_313, -6.5_401], [-6.6_105, -6.3_427, -6.4_675], ] a : int = torch.tensor(__UpperCAmelCase).to(__UpperCAmelCase) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase)) # class_queries_logits a : int = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) a : Optional[Any] = torch.tensor( [ [1.8_324, -8.0_835, -4.1_922], [0.8_450, -9.0_050, -3.6_053], [0.3_045, -7.7_293, -3.0_275], ]).to(__UpperCAmelCase) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase)) def __snake_case ( self : List[Any]): a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(__UpperCAmelCase).eval() a : int = self.default_image_processor a : Dict = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="pt" , ) a : Dict = inputs["pixel_values"].to(__UpperCAmelCase) a : Union[str, Any] = [el.to(__UpperCAmelCase) for el in inputs["mask_labels"]] a : Union[str, Any] = [el.to(__UpperCAmelCase) for el in inputs["class_labels"]] with torch.no_grad(): a : List[str] = model(**__UpperCAmelCase) self.assertTrue(outputs.loss is not None)
40
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def lowercase ( A_ )-> Any: '''simple docstring''' a : int = SwinvaConfig() a : Any = swinva_name.split("_" ) a : Optional[int] = name_split[1] if "to" in name_split[3]: a : Optional[int] = int(name_split[3][-3:] ) else: a : List[str] = int(name_split[3] ) if "to" in name_split[2]: a : Optional[int] = int(name_split[2][-2:] ) else: a : int = int(name_split[2][6:] ) if model_size == "tiny": a : Optional[int] = 96 a : Any = (2, 2, 6, 2) a : Dict = (3, 6, 12, 24) elif model_size == "small": a : List[str] = 96 a : Optional[int] = (2, 2, 18, 2) a : Dict = (3, 6, 12, 24) elif model_size == "base": a : Optional[Any] = 128 a : Dict = (2, 2, 18, 2) a : Any = (4, 8, 16, 32) else: a : List[Any] = 192 a : Any = (2, 2, 18, 2) a : Any = (6, 12, 24, 48) if "to" in swinva_name: a : int = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): a : Tuple = 21_841 a : Union[str, Any] = "huggingface/label-files" a : List[Any] = "imagenet-22k-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : List[Any] = {int(A_ ): v for k, v in idalabel.items()} a : Union[str, Any] = idalabel a : Tuple = {v: k for k, v in idalabel.items()} else: a : Union[str, Any] = 1_000 a : Union[str, Any] = "huggingface/label-files" a : int = "imagenet-1k-id2label.json" a : Optional[Any] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Any = {int(A_ ): v for k, v in idalabel.items()} a : int = idalabel a : Optional[Any] = {v: k for k, v in idalabel.items()} a : str = img_size a : Union[str, Any] = num_classes a : Union[str, Any] = embed_dim a : Optional[Any] = depths a : Optional[int] = num_heads a : int = window_size return config def lowercase ( A_ )-> Union[str, Any]: '''simple docstring''' if "patch_embed.proj" in name: a : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a : int = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: a : Optional[Any] = "encoder." + name if "attn.proj" in name: a : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : List[Any] = name.replace("attn" , "attention.self" ) if "norm1" in name: a : List[str] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : int = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : int = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: a : Dict = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: a : Any = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: a : List[str] = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: a : Tuple = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": a : int = "layernorm.weight" if name == "norm.bias": a : int = "layernorm.bias" if "head" in name: a : List[Any] = name.replace("head" , "classifier" ) else: a : List[Any] = "swinv2." + name return name def lowercase ( A_ , A_ )-> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : List[Any] = orig_state_dict.pop(A_ ) if "mask" in key: continue elif "qkv" in key: a : List[str] = key.split("." ) a : Optional[int] = int(key_split[1] ) a : Union[str, Any] = int(key_split[3] ) a : Tuple = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: a : int = val[:dim, :] a : Union[str, Any] = val[dim : dim * 2, :] a : List[str] = val[-dim:, :] else: a : str = val[:dim] a : str = val[ dim : dim * 2 ] a : str = val[-dim:] else: a : List[Any] = val return orig_state_dict def lowercase ( A_ , A_ )-> Optional[int]: '''simple docstring''' a : Union[str, Any] = timm.create_model(A_ , pretrained=A_ ) timm_model.eval() a : str = get_swinva_config(A_ ) a : Dict = SwinvaForImageClassification(A_ ) model.eval() a : Tuple = convert_state_dict(timm_model.state_dict() , A_ ) model.load_state_dict(A_ ) a : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" a : Optional[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) a : Optional[int] = Image.open(requests.get(A_ , stream=A_ ).raw ) a : int = image_processor(images=A_ , return_tensors="pt" ) a : Tuple = timm_model(inputs["pixel_values"] ) a : Optional[Any] = model(**A_ ).logits assert torch.allclose(A_ , A_ , atol=1e-3 ) print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) model.push_to_hub( repo_path_or_name=Path(A_ , A_ ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __lowercase = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
40
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
1
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' _validate_point(A_ ) _validate_point(A_ ) if len(A_ ) != len(A_ ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) ) def lowercase ( A_ )-> None: '''simple docstring''' if point: if isinstance(A_ , A_ ): for item in point: if not isinstance(A_ , (int, float) ): a : Union[str, Any] = ( "Expected a list of numbers as input, found " F'''{type(A_ ).__name__}''' ) raise TypeError(A_ ) else: a : Optional[int] = F'''Expected a list of numbers as input, found {type(A_ ).__name__}''' raise TypeError(A_ ) else: raise ValueError("Missing an input" ) def lowercase ( A_ , A_ )-> float: '''simple docstring''' _validate_point(A_ ) _validate_point(A_ ) if len(A_ ) != len(A_ ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } a : List[str] = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(__UpperCAmelCase) , __UpperCAmelCase) def __snake_case ( self : List[Any]): a : Dict = np.random.randn(3 , 4) self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , x.transpose())) a : str = np.random.randn(3 , 4 , 5) self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , x.transpose((1, 2, 0)))) @require_torch def __snake_case ( self : Tuple): a : Union[str, Any] = np.random.randn(3 , 4) a : Union[str, Any] = torch.tensor(__UpperCAmelCase) self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , transpose(__UpperCAmelCase).numpy())) a : Tuple = np.random.randn(3 , 4 , 5) a : Optional[int] = torch.tensor(__UpperCAmelCase) self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , transpose(__UpperCAmelCase , axes=(1, 2, 0)).numpy())) @require_tf def __snake_case ( self : List[str]): a : int = np.random.randn(3 , 4) a : Optional[int] = tf.constant(__UpperCAmelCase) self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , transpose(__UpperCAmelCase).numpy())) a : Optional[Any] = np.random.randn(3 , 4 , 5) a : List[str] = tf.constant(__UpperCAmelCase) self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , transpose(__UpperCAmelCase , axes=(1, 2, 0)).numpy())) @require_flax def __snake_case ( self : str): a : Union[str, Any] = np.random.randn(3 , 4) a : Dict = jnp.array(__UpperCAmelCase) self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , np.asarray(transpose(__UpperCAmelCase)))) a : str = np.random.randn(3 , 4 , 5) a : List[str] = jnp.array(__UpperCAmelCase) self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , np.asarray(transpose(__UpperCAmelCase , axes=(1, 2, 0))))) def __snake_case ( self : Optional[int]): a : Union[str, Any] = np.random.randn(3 , 4) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , np.reshape(__UpperCAmelCase , (4, 3)))) a : Dict = np.random.randn(3 , 4 , 5) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , np.reshape(__UpperCAmelCase , (12, 5)))) @require_torch def __snake_case ( self : Tuple): a : List[Any] = np.random.randn(3 , 4) a : Tuple = torch.tensor(__UpperCAmelCase) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , reshape(__UpperCAmelCase , (4, 3)).numpy())) a : List[str] = np.random.randn(3 , 4 , 5) a : Union[str, Any] = torch.tensor(__UpperCAmelCase) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , reshape(__UpperCAmelCase , (12, 5)).numpy())) @require_tf def __snake_case ( self : Optional[int]): a : List[Any] = np.random.randn(3 , 4) a : Dict = tf.constant(__UpperCAmelCase) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , reshape(__UpperCAmelCase , (4, 3)).numpy())) a : Union[str, Any] = np.random.randn(3 , 4 , 5) a : Union[str, Any] = tf.constant(__UpperCAmelCase) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , reshape(__UpperCAmelCase , (12, 5)).numpy())) @require_flax def __snake_case ( self : Union[str, Any]): a : Optional[Any] = np.random.randn(3 , 4) a : Any = jnp.array(__UpperCAmelCase) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , np.asarray(reshape(__UpperCAmelCase , (4, 3))))) a : List[str] = np.random.randn(3 , 4 , 5) a : Union[str, Any] = jnp.array(__UpperCAmelCase) self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , np.asarray(reshape(__UpperCAmelCase , (12, 5))))) def __snake_case ( self : Optional[int]): a : Tuple = np.random.randn(1 , 3 , 4) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , np.squeeze(__UpperCAmelCase))) a : List[str] = np.random.randn(1 , 4 , 1 , 5) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , np.squeeze(__UpperCAmelCase , axis=2))) @require_torch def __snake_case ( self : Tuple): a : Dict = np.random.randn(1 , 3 , 4) a : Any = torch.tensor(__UpperCAmelCase) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , squeeze(__UpperCAmelCase).numpy())) a : Any = np.random.randn(1 , 4 , 1 , 5) a : str = torch.tensor(__UpperCAmelCase) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , squeeze(__UpperCAmelCase , axis=2).numpy())) @require_tf def __snake_case ( self : Tuple): a : int = np.random.randn(1 , 3 , 4) a : List[str] = tf.constant(__UpperCAmelCase) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , squeeze(__UpperCAmelCase).numpy())) a : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5) a : int = tf.constant(__UpperCAmelCase) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , squeeze(__UpperCAmelCase , axis=2).numpy())) @require_flax def __snake_case ( self : Dict): a : Tuple = np.random.randn(1 , 3 , 4) a : List[Any] = jnp.array(__UpperCAmelCase) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , np.asarray(squeeze(__UpperCAmelCase)))) a : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5) a : Tuple = jnp.array(__UpperCAmelCase) self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , np.asarray(squeeze(__UpperCAmelCase , axis=2)))) def __snake_case ( self : List[Any]): a : Optional[int] = np.random.randn(3 , 4) self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , np.expand_dims(__UpperCAmelCase , axis=1))) @require_torch def __snake_case ( self : Optional[Any]): a : Any = np.random.randn(3 , 4) a : Optional[Any] = torch.tensor(__UpperCAmelCase) self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , expand_dims(__UpperCAmelCase , axis=1).numpy())) @require_tf def __snake_case ( self : str): a : int = np.random.randn(3 , 4) a : int = tf.constant(__UpperCAmelCase) self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , expand_dims(__UpperCAmelCase , axis=1).numpy())) @require_flax def __snake_case ( self : List[Any]): a : Optional[int] = np.random.randn(3 , 4) a : Any = jnp.array(__UpperCAmelCase) self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , np.asarray(expand_dims(__UpperCAmelCase , axis=1))))
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = SpeechTaTokenizer UpperCAmelCase : List[Any] = False UpperCAmelCase : Any = True def __snake_case ( self : Optional[Any]): super().setUp() # We have a SentencePiece fixture for testing a : Optional[int] = SpeechTaTokenizer(__UpperCAmelCase) a : List[str] = AddedToken("<mask>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) a : List[Any] = mask_token tokenizer.add_special_tokens({"mask_token": mask_token}) tokenizer.add_tokens(["<ctc_blank>"]) tokenizer.save_pretrained(self.tmpdirname) def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str): a : Dict = "this is a test" a : Tuple = "this is a test" return input_text, output_text def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=20 , __UpperCAmelCase : Tuple=5): a , a : List[Any] = self.get_input_output_texts(__UpperCAmelCase) a : Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) a : List[str] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase) return text, ids def __snake_case ( self : Optional[Any]): a : Dict = "<pad>" a : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase) def __snake_case ( self : List[Any]): a : Tuple = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<s>") self.assertEqual(vocab_keys[1] , "<pad>") self.assertEqual(vocab_keys[-4] , "œ") self.assertEqual(vocab_keys[-2] , "<mask>") self.assertEqual(vocab_keys[-1] , "<ctc_blank>") self.assertEqual(len(__UpperCAmelCase) , 81) def __snake_case ( self : Optional[int]): self.assertEqual(self.get_tokenizer().vocab_size , 79) def __snake_case ( self : int): a : Any = self.get_tokenizers(do_lower_case=__UpperCAmelCase) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}'''): a : int = tokenizer.vocab_size a : Optional[Any] = len(__UpperCAmelCase) self.assertNotEqual(__UpperCAmelCase , 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) a : List[str] = ["aaaaa bbbbbb", "cccccccccdddddddd"] a : Tuple = tokenizer.add_tokens(__UpperCAmelCase) a : Optional[int] = tokenizer.vocab_size a : Any = len(__UpperCAmelCase) self.assertNotEqual(__UpperCAmelCase , 0) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) self.assertEqual(__UpperCAmelCase , len(__UpperCAmelCase)) self.assertEqual(__UpperCAmelCase , all_size + len(__UpperCAmelCase)) a : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__UpperCAmelCase) self.assertGreaterEqual(len(__UpperCAmelCase) , 4) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) a : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} a : Any = tokenizer.add_special_tokens(__UpperCAmelCase) a : List[str] = tokenizer.vocab_size a : List[str] = len(__UpperCAmelCase) self.assertNotEqual(__UpperCAmelCase , 0) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) self.assertEqual(__UpperCAmelCase , len(__UpperCAmelCase)) self.assertEqual(__UpperCAmelCase , all_size_a + len(__UpperCAmelCase)) a : Union[str, Any] = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__UpperCAmelCase) self.assertGreaterEqual(len(__UpperCAmelCase) , 6) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[0] , tokens[1]) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokens[-4]) self.assertEqual(tokens[0] , tokenizer.eos_token_id) self.assertEqual(tokens[-3] , tokenizer.pad_token_id) def __snake_case ( self : Dict): pass def __snake_case ( self : str): pass def __snake_case ( self : Union[str, Any]): a : Union[str, Any] = self.get_tokenizer() a : List[str] = tokenizer.tokenize("This is a test") # fmt: off self.assertListEqual(__UpperCAmelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"]) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) a : Any = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( __UpperCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."]) a : Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase) # fmt: off self.assertListEqual(__UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) # fmt: on a : Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."]) @slow def __snake_case ( self : Any): # Use custom sequence because this tokenizer does not handle numbers. a : Union[str, Any] = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off a : str = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=__UpperCAmelCase , )
40
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
1
"""simple docstring""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = model.config a : List[str] = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) a : str = MBartConfig( is_decoder=A_ , is_encoder_decoder=A_ , add_cross_attention=A_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=A_ , add_final_layer_norm=A_ , ) return encoder_config, decoder_config def lowercase ( A_ )-> Dict: '''simple docstring''' if "encoder.model" in name: a : List[str] = name.replace("encoder.model" , "encoder" ) if "decoder.model" in name: a : Union[str, Any] = name.replace("decoder.model" , "decoder" ) if "patch_embed.proj" in name: a : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a : int = name.replace("patch_embed.norm" , "embeddings.norm" ) if name.startswith("encoder" ): if "layers" in name: a : List[str] = "encoder." + name if "attn.proj" in name: a : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "mask" not in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : Dict = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : str = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : List[Any] = name.replace("mlp.fc2" , "output.dense" ) if name == "encoder.norm.weight": a : Tuple = "encoder.layernorm.weight" if name == "encoder.norm.bias": a : Union[str, Any] = "encoder.layernorm.bias" return name def lowercase ( A_ , A_ )-> Tuple: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : Tuple = orig_state_dict.pop(A_ ) if "qkv" in key: a : Any = key.split("." ) a : int = int(key_split[3] ) a : Optional[Any] = int(key_split[5] ) a : Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: a : Any = val[:dim, :] a : Tuple = val[dim : dim * 2, :] a : Optional[Any] = val[-dim:, :] else: a : List[str] = val[:dim] a : List[str] = val[dim : dim * 2] a : Tuple = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: a : Union[str, Any] = val return orig_state_dict def lowercase ( A_ , A_=None , A_=False )-> List[str]: '''simple docstring''' a : Dict = DonutModel.from_pretrained(A_ ).eval() # load HuggingFace model a , a : Optional[int] = get_configs(A_ ) a : Union[str, Any] = DonutSwinModel(A_ ) a : Union[str, Any] = MBartForCausalLM(A_ ) a : str = VisionEncoderDecoderModel(encoder=A_ , decoder=A_ ) model.eval() a : Optional[Any] = original_model.state_dict() a : Union[str, Any] = convert_state_dict(A_ , A_ ) model.load_state_dict(A_ ) # verify results on scanned document a : Optional[Any] = load_dataset("hf-internal-testing/example-documents" ) a : Tuple = dataset["test"][0]["image"].convert("RGB" ) a : Any = XLMRobertaTokenizerFast.from_pretrained(A_ , from_slow=A_ ) a : Dict = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) a : Tuple = DonutProcessor(A_ , A_ ) a : Union[str, Any] = processor(A_ , return_tensors="pt" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": a : List[str] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : int = "When is the coffee break?" a : int = task_prompt.replace("{user_input}" , A_ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": a : Dict = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: a : Dict = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": a : List[Any] = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": a : int = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt a : List[str] = "hello world" else: raise ValueError("Model name not supported" ) a : Tuple = original_model.decoder.tokenizer(A_ , add_special_tokens=A_ , return_tensors="pt" )[ "input_ids" ] a : Optional[int] = original_model.encoder.model.patch_embed(A_ ) a , a : int = model.encoder.embeddings(A_ ) assert torch.allclose(A_ , A_ , atol=1e-3 ) # verify encoder hidden states a : List[str] = original_model.encoder(A_ ) a : Dict = model.encoder(A_ ).last_hidden_state assert torch.allclose(A_ , A_ , atol=1e-2 ) # verify decoder hidden states a : Union[str, Any] = original_model(A_ , A_ , A_ ).logits a : List[str] = model(A_ , decoder_input_ids=A_ ).logits assert torch.allclose(A_ , A_ , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) processor.save_pretrained(A_ ) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" ) processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""naver-clova-ix/donut-base-finetuned-docvqa""", required=False, type=str, help="""Name of the original model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, required=False, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub.""", ) __lowercase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
1
"""simple docstring""" from math import sqrt def lowercase ( A_ = 1_000_000 )-> int: '''simple docstring''' a : int = 0 a : int = 0 a : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(A_ , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
40
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
1
"""simple docstring""" def lowercase ( A_ )-> int: '''simple docstring''' if not isinstance(A_ , A_ ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
1
"""simple docstring""" def lowercase ( A_ )-> list: '''simple docstring''' a : int = False while is_sorted is False: # Until all the indices are traversed keep looping a : Optional[Any] = True for i in range(0 , len(A_ ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: a , a : Any = input_list[i + 1], input_list[i] # swapping if elements not in order a : Optional[Any] = False for i in range(1 , len(A_ ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: a , a : Optional[int] = input_list[i + 1], input_list[i] # swapping if elements not in order a : str = False return input_list if __name__ == "__main__": print("""Enter list to be sorted""") __lowercase = [int(x) for x in input().split()] # inputing elements of the list in one line __lowercase = odd_even_sort(input_list) print("""The sorted list is""") print(sorted_list)
40
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
1
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowercase = logging.get_logger(__name__) __lowercase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __lowercase = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __lowercase = { """allenai/led-base-16384""": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase ( )-> Dict: '''simple docstring''' a : int = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) a : int = bs[:] a : List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(A_ ) cs.append(2**8 + n ) n += 1 a : List[str] = [chr(A_ ) for n in cs] return dict(zip(A_ , A_ ) ) def lowercase ( A_ )-> Optional[Any]: '''simple docstring''' a : Any = set() a : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a : int = char return pairs class _A ( _a ): """simple docstring""" UpperCAmelCase : Dict = VOCAB_FILES_NAMES UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""] def __init__( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]="replace" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : Tuple="</s>" , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : str="<pad>" , __UpperCAmelCase : Union[str, Any]="<mask>" , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Any , ): a : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else bos_token a : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else eos_token a : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else sep_token a : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else cls_token a : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else unk_token a : int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else pad_token # Mask token behave like a normal word, i.e. include the space before it a : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token super().__init__( errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) with open(__UpperCAmelCase , encoding="utf-8") as vocab_handle: a : int = json.load(__UpperCAmelCase) a : Optional[int] = {v: k for k, v in self.encoder.items()} a : Optional[int] = errors # how to handle errors in decoding a : Any = bytes_to_unicode() a : Optional[int] = {v: k for k, v in self.byte_encoder.items()} with open(__UpperCAmelCase , encoding="utf-8") as merges_handle: a : Any = merges_handle.read().split("\n")[1:-1] a : Optional[Any] = [tuple(merge.split()) for merge in bpe_merges] a : List[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase)))) a : Tuple = {} a : List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions a : List[Any] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+") @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def __snake_case ( self : Optional[int]): return len(self.encoder) def __snake_case ( self : Any): return dict(self.encoder , **self.added_tokens_encoder) def __snake_case ( self : List[str] , __UpperCAmelCase : int): if token in self.cache: return self.cache[token] a : List[Any] = tuple(__UpperCAmelCase) a : Any = get_pairs(__UpperCAmelCase) if not pairs: return token while True: a : Union[str, Any] = min(__UpperCAmelCase , key=lambda __UpperCAmelCase: self.bpe_ranks.get(__UpperCAmelCase , float("inf"))) if bigram not in self.bpe_ranks: break a , a : str = bigram a : Tuple = [] a : Optional[int] = 0 while i < len(__UpperCAmelCase): try: a : str = word.index(__UpperCAmelCase , __UpperCAmelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) a : Tuple = j if word[i] == first and i < len(__UpperCAmelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 a : str = tuple(__UpperCAmelCase) a : Dict = new_word if len(__UpperCAmelCase) == 1: break else: a : str = get_pairs(__UpperCAmelCase) a : Any = " ".join(__UpperCAmelCase) a : Dict = word return word def __snake_case ( self : Optional[int] , __UpperCAmelCase : str): a : int = [] for token in re.findall(self.pat , __UpperCAmelCase): a : List[str] = "".join( self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase).split(" ")) return bpe_tokens def __snake_case ( self : Dict , __UpperCAmelCase : str): return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token)) def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any]): return self.decoder.get(__UpperCAmelCase) def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str]): a : Union[str, Any] = "".join(__UpperCAmelCase) a : List[Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors) return text def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None): if not os.path.isdir(__UpperCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return a : List[Any] = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) a : int = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(__UpperCAmelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase) + "\n") a : Tuple = 0 with open(__UpperCAmelCase , "w" , encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase: kv[1]): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!") a : List[Any] = token_index writer.write(" ".join(__UpperCAmelCase) + "\n") index += 1 return vocab_file, merge_file def __snake_case ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a : Optional[int] = [self.cls_token_id] a : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase)) + [1] return [1] + ([0] * len(__UpperCAmelCase)) + [1, 1] + ([0] * len(__UpperCAmelCase)) + [1] def __snake_case ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None): a : Optional[Any] = [self.sep_token_id] a : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __snake_case ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any]=False , **__UpperCAmelCase : Union[str, Any]): a : int = kwargs.pop("add_prefix_space" , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase) > 0 and not text[0].isspace()): a : List[Any] = " " + text return (text, kwargs) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , ): a : Union[str, Any] = super()._pad( encoded_inputs=__UpperCAmelCase , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ) # Load from model defaults if return_attention_mask is None: a : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a : Tuple = len(encoded_inputs["global_attention_mask"]) != len(__UpperCAmelCase) if needs_to_be_padded: a : Union[str, Any] = len(__UpperCAmelCase) - len(encoded_inputs["global_attention_mask"]) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a : str = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a : Optional[Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
1
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _a ( a :Matrix , a :int , a :int , a :int ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _a ( a :Matrix ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _a ( a :Matrix ) -> Matrix | None: if location := find_empty_location(a ): a , a = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(a , a , a , a ): a = digit if sudoku(a ) is not None: return grid a = 0 return None def _a ( a :Matrix ) -> None: for row in grid: for cell in row: print(a , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
0
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE_: Dict =get_tests_dir('fixtures/dummy-config.json') class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = 0 def _lowercase (self : Union[str, Any] ): self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) ) def _lowercase (self : int ): UpperCAmelCase_ = AutoConfig.from_pretrained("bert-base-uncased" ) self.assertIsInstance(__a , __a ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = AutoConfig.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = AutoConfig.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = AutoConfig.for_model("roberta" ) self.assertIsInstance(__a , __a ) def _lowercase (self : List[str] ): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. UpperCAmelCase_ = os.path.join(__a , "fake-roberta" ) os.makedirs(__a , exist_ok=__a ) with open(os.path.join(__a , "config.json" ) , "w" ) as f: f.write(json.dumps({} ) ) UpperCAmelCase_ = AutoConfig.from_pretrained(__a ) self.assertEqual(type(__a ) , __a ) def _lowercase (self : str ): try: AutoConfig.register("custom" , __a ) # Wrong model type will raise an error with self.assertRaises(__a ): AutoConfig.register("model" , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoConfig.register("bert" , __a ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase_ = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a ) UpperCAmelCase_ = AutoConfig.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def _lowercase (self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase_ = AutoConfig.from_pretrained("bert-base" ) def _lowercase (self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase_ = AutoConfig.from_pretrained(__a , revision="aaaaaa" ) def _lowercase (self : Tuple ): with self.assertRaisesRegex( __a , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ): UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" ) def _lowercase (self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__a ) UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__a ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a ) UpperCAmelCase_ = AutoConfig.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" ) def _lowercase (self : Tuple ): class __A ( UpperCamelCase__ ): a__ : str = """new-model""" try: AutoConfig.register("new-model" , __a ) # If remote code is not set, the default is to use local UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote code is disabled, we load the local one. UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__a ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote is enabled, we load from the Hub UpperCAmelCase_ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__a ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
'''simple docstring''' from __future__ import annotations import collections import pprint from pathlib import Path def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" return "".join(sorted(A ) ) def _SCREAMING_SNAKE_CASE (A ) -> list[str]: """simple docstring""" return word_by_signature[signature(A )] lowerCamelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8') lowerCamelCase : List[Any] = sorted({word.strip().lower() for word in data.splitlines()}) lowerCamelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": lowerCamelCase : Any = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('anagrams.txt', 'w') as file: file.write('all_anagrams = \n ') file.write(pprint.pformat(all_anagrams))
2
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowercase : Tuple = logging.get_logger(__name__) lowercase : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowercase : Union[str, Any] = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } lowercase : List[str] = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } lowercase : List[str] = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_INIT_CONFIGURATION __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = SqueezeBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): A : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) ) A : Dict = do_lower_case A : Dict = strip_accents A : Any = tokenize_chinese_chars A : Optional[int] = normalizer_class(**SCREAMING_SNAKE_CASE ) A : str = do_lower_case def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: """simple docstring""" A : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Union[str, Any] = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" A : str = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) return tuple(SCREAMING_SNAKE_CASE )
3
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig __snake_case =logging.get_logger(__name__) # General docstring __snake_case ="""RegNetConfig""" # Base docstring __snake_case ="""facebook/regnet-y-040""" __snake_case =[1, 1_088, 7, 7] # Image classification docstring __snake_case ="""facebook/regnet-y-040""" __snake_case ="""tabby, tabby cat""" __snake_case =[ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase_ ( nn.Module ): def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , ) -> Any: super().__init__() lowerCAmelCase = nn.Convad( UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , ) lowerCAmelCase = nn.BatchNormad(UpperCAmelCase__ ) lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity() def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tuple ) -> Tuple: lowerCAmelCase = self.convolution(UpperCAmelCase__ ) lowerCAmelCase = self.normalization(UpperCAmelCase__ ) lowerCAmelCase = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCAmelCase_ ( nn.Module ): def __init__( self : Optional[int] , UpperCAmelCase__ : RegNetConfig ) -> Tuple: super().__init__() lowerCAmelCase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowerCAmelCase = config.num_channels def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) -> Any: lowerCAmelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) lowerCAmelCase = self.embedder(UpperCAmelCase__ ) return hidden_state class UpperCAmelCase_ ( nn.Module ): def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 ) -> Optional[int]: super().__init__() lowerCAmelCase = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ ) lowerCAmelCase = nn.BatchNormad(UpperCAmelCase__ ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Tensor ) -> Tensor: lowerCAmelCase = self.convolution(UpperCAmelCase__ ) lowerCAmelCase = self.normalization(UpperCAmelCase__ ) return hidden_state class UpperCAmelCase_ ( nn.Module ): def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]: super().__init__() lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) lowerCAmelCase = nn.Sequential( nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , ) def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : str ) -> Optional[Any]: # b c h w -> b c 1 1 lowerCAmelCase = self.pooler(UpperCAmelCase__ ) lowerCAmelCase = self.attention(UpperCAmelCase__ ) lowerCAmelCase = hidden_state * attention return hidden_state class UpperCAmelCase_ ( nn.Module ): def __init__( self : Optional[int] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 ) -> Optional[int]: super().__init__() lowerCAmelCase = in_channels != out_channels or stride != 1 lowerCAmelCase = max(1 , out_channels // config.groups_width ) lowerCAmelCase = ( RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) lowerCAmelCase = nn.Sequential( RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , ) lowerCAmelCase = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Any ) -> Union[str, Any]: lowerCAmelCase = hidden_state lowerCAmelCase = self.layer(UpperCAmelCase__ ) lowerCAmelCase = self.shortcut(UpperCAmelCase__ ) hidden_state += residual lowerCAmelCase = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCAmelCase_ ( nn.Module ): def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 ) -> Optional[Any]: super().__init__() lowerCAmelCase = in_channels != out_channels or stride != 1 lowerCAmelCase = max(1 , out_channels // config.groups_width ) lowerCAmelCase = ( RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) lowerCAmelCase = nn.Sequential( RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , ) lowerCAmelCase = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, Any] ) -> Tuple: lowerCAmelCase = hidden_state lowerCAmelCase = self.layer(UpperCAmelCase__ ) lowerCAmelCase = self.shortcut(UpperCAmelCase__ ) hidden_state += residual lowerCAmelCase = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCAmelCase_ ( nn.Module ): def __init__( self : Union[str, Any] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) -> Optional[Any]: super().__init__() lowerCAmelCase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer lowerCAmelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(depth - 1 )] , ) def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Tuple: lowerCAmelCase = self.layers(UpperCAmelCase__ ) return hidden_state class UpperCAmelCase_ ( nn.Module ): def __init__( self : Any , UpperCAmelCase__ : RegNetConfig ) -> Dict: super().__init__() lowerCAmelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ): self.stages.append(RegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention: lowerCAmelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCAmelCase = hidden_states + (hidden_state,) lowerCAmelCase = stage_module(UpperCAmelCase__ ) if output_hidden_states: lowerCAmelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ ) class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : List[Any] = RegNetConfig lowerCamelCase : Any = '''regnet''' lowerCamelCase : Any = '''pixel_values''' lowerCamelCase : Union[str, Any] = True def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Optional[int]: if isinstance(UpperCAmelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple=False ) -> Any: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase = value __snake_case =R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ __snake_case =R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , __lowercase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class UpperCAmelCase_ ( __lowercase ): def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] ) -> List[Any]: super().__init__(UpperCAmelCase__ ) lowerCAmelCase = config lowerCAmelCase = RegNetEmbeddings(UpperCAmelCase__ ) lowerCAmelCase = RegNetEncoder(UpperCAmelCase__ ) lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: lowerCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase = self.embedder(UpperCAmelCase__ ) lowerCAmelCase = self.encoder( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) lowerCAmelCase = encoder_outputs[0] lowerCAmelCase = self.pooler(UpperCAmelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __lowercase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class UpperCAmelCase_ ( __lowercase ): def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> str: super().__init__(UpperCAmelCase__ ) lowerCAmelCase = config.num_labels lowerCAmelCase = RegNetModel(UpperCAmelCase__ ) # classification head lowerCAmelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase = self.regnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase = self.classifier(UpperCAmelCase__ ) lowerCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCAmelCase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCAmelCase = 'single_label_classification' else: lowerCAmelCase = 'multi_label_classification' if self.config.problem_type == "regression": lowerCAmelCase = MSELoss() if self.num_labels == 1: lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCAmelCase = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ ) elif self.config.problem_type == "single_label_classification": lowerCAmelCase = CrossEntropyLoss() lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCAmelCase = BCEWithLogitsLoss() lowerCAmelCase = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ ) if not return_dict: lowerCAmelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
4
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
0
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor UpperCAmelCase__ = logging.get_logger(__name__) class lowerCamelCase__ ( lowerCAmelCase): def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
5
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __A: snake_case_ = 42 snake_case_ = None # Automatically constructed snake_case_ = "dict" snake_case_ = None snake_case_ = field(default='''Translation''' , init=a , repr=a ) def __call__( self ) -> Tuple: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class __A: snake_case_ = None snake_case_ = None snake_case_ = None # Automatically constructed snake_case_ = "dict" snake_case_ = None snake_case_ = field(default='''TranslationVariableLanguages''' , init=a , repr=a ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = sorted(set(self.languages ) ) if self.languages else None __a = len(self.languages ) if self.languages else None def __call__( self ) -> Any: '''simple docstring''' return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str: '''simple docstring''' __a = set(self.languages ) if self.languages and set(_snake_case ) - lang_set: raise ValueError( F"""Some languages in example ({', '.join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({', '.join(_snake_case )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __a = [] for lang, text in translation_dict.items(): if isinstance(_snake_case , _snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __a , __a = zip(*sorted(_snake_case ) ) return {"language": languages, "translation": translations} def SCREAMING_SNAKE_CASE_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
6
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
7
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '''▁''' lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } lowerCAmelCase_ = { '''facebook/xglm-564M''': 20_48, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : int="<s>" , _UpperCamelCase : int="</s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Optional[Any] , ) ->None: snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case_ = 7 snake_case_ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] snake_case_ = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCamelCase ) ) snake_case_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case_ = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} snake_case_ = len(self.sp_model ) snake_case_ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_UpperCamelCase ) snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) ->str: snake_case_ = self.__dict__.copy() snake_case_ = None snake_case_ = self.sp_model.serialized_model_proto() return state def __setstate__( self : Any , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case_ = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def snake_case__( self : int ) ->Union[str, Any]: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def snake_case__( self : Any ) ->int: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case__( self : int , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) ->Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ = self.sp_model.PieceToId(_UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case__( self : Dict , _UpperCamelCase : List[str] ) ->int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case__( self : Tuple , _UpperCamelCase : int ) ->str: snake_case_ = ''''''.join(_UpperCamelCase ).replace(_UpperCamelCase , ''' ''' ).strip() return out_string def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
8
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
0
import argparse import math import traceback import dateutil.parser as date_parser import requests def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : List[str] = {} __SCREAMING_SNAKE_CASE : Optional[Any] = job['''started_at'''] __SCREAMING_SNAKE_CASE : List[str] = job['''completed_at'''] __SCREAMING_SNAKE_CASE : List[str] = date_parser.parse(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = date_parser.parse(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __SCREAMING_SNAKE_CASE : Any = start __SCREAMING_SNAKE_CASE : Optional[int] = end __SCREAMING_SNAKE_CASE : Dict = duration_in_min return job_info def _UpperCamelCase ( lowercase__ , lowercase__=None ): __SCREAMING_SNAKE_CASE : Optional[Any] = None if token is not None: __SCREAMING_SNAKE_CASE : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''} __SCREAMING_SNAKE_CASE : int = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __SCREAMING_SNAKE_CASE : int = requests.get(lowercase__ , headers=lowercase__ ).json() __SCREAMING_SNAKE_CASE : Optional[Any] = {} try: job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} ) __SCREAMING_SNAKE_CASE : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = requests.get(url + F'''&page={i + 2}''' , headers=lowercase__ ).json() job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCAmelCase : int =argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') __lowerCAmelCase : Tuple =parser.parse_args() __lowerCAmelCase : Any =get_job_time(args.workflow_run_id) __lowerCAmelCase : int =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"""{k}: {v["duration"]}""")
9
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __A = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __A = json.load(f) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[int]) ->str: '''simple docstring''' return FSMTTokenizer.from_pretrained(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : int) ->List[Any]: '''simple docstring''' lowerCamelCase__: Dict =FSMTForConditionalGeneration.from_pretrained(UpperCAmelCase_).to(UpperCAmelCase_) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ]) @slow def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =F"""facebook/wmt19-{pair}""" lowerCamelCase__: str =self.get_tokenizer(UpperCAmelCase_) lowerCamelCase__: Optional[Any] =self.get_model(UpperCAmelCase_) lowerCamelCase__: Optional[Any] =bleu_data[pair]["src"] lowerCamelCase__: Optional[int] =bleu_data[pair]["tgt"] lowerCamelCase__: Union[str, Any] =tokenizer(UpperCAmelCase_ , return_tensors="pt" , truncation=UpperCAmelCase_ , padding="longest").to(UpperCAmelCase_) lowerCamelCase__: List[Any] =model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCamelCase__: Union[str, Any] =tokenizer.batch_decode( UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_) lowerCamelCase__: str =calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_) print(UpperCAmelCase_) self.assertGreaterEqual(scores["bleu"] , UpperCAmelCase_)
10
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , __lowerCamelCase = False) -> Dict: _A : Dict = scheduler _A : Union[str, Any] = optimizers if isinstance(__lowerCamelCase , (list, tuple)) else [optimizers] _A : int = split_batches _A : int = step_with_optimizer _A : Any = GradientState() def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _A : List[Any] = AcceleratorState().num_processes for _ in range(__lowerCamelCase): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps"): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) else: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: return self.scheduler.get_last_lr() def _lowerCamelCase ( self) -> Optional[Any]: return self.scheduler.state_dict() def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: self.scheduler.load_state_dict(__lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: return self.scheduler.get_lr() def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase)
11
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
import string from math import logaa def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) __lowerCamelCase = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' __lowerCamelCase = corpus_without_punctuation.split("""\n""" ) __lowerCamelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(A__ )) def lowerCamelCase__ ( A__ : int , A__ : int , A__ : Optional[Any]=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def lowerCamelCase__ ( A__ : int , A__ : int ): '''simple docstring''' return round(tf * idf , 3 )
12
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
0
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase : List[Any] = None lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Optional[Any] = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", }, """tokenizer_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""", }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase : str = { """t5-small""": 512, """t5-base""": 512, """t5-large""": 512, """t5-3b""": 512, """t5-11b""": 512, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Optional[int] = TaTokenizer _UpperCAmelCase : List[int] = [] def __init__( self : Any , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : str="<pad>" , lowerCAmelCase__ : List[Any]=100 , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Tuple , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: SCREAMING_SNAKE_CASE_: Optional[int] = [F"<extra_id_{i}>" for i in range(lowerCAmelCase__)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens SCREAMING_SNAKE_CASE_: List[str] = len(set(filter(lambda lowerCAmelCase__: bool("extra_id_" in str(lowerCAmelCase__)) , lowerCAmelCase__))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens") super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , extra_ids=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Dict = vocab_file SCREAMING_SNAKE_CASE_: Dict = False if not self.vocab_file else True SCREAMING_SNAKE_CASE_: List[str] = extra_ids @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: SCREAMING_SNAKE_CASE_: List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" F" {pretrained_model_name_or_path} automatically truncating your input to" F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase__ , ) return max_model_length def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(lowerCAmelCase__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return SCREAMING_SNAKE_CASE_: Tuple = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file , lowerCAmelCase__) logger.info(F"Copy vocab file to {out_vocab_file}") return (out_vocab_file,) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: Optional[int] = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: SCREAMING_SNAKE_CASE_: Optional[int] = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: List[Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def _SCREAMING_SNAKE_CASE ( self : List[str]): return list( set(filter(lambda lowerCAmelCase__: bool(re.search(R"<extra_id_\d+>" , lowerCAmelCase__)) is not None , self.additional_special_tokens))) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): return [self.convert_tokens_to_ids(lowerCAmelCase__) for token in self.get_sentinel_tokens()]
13
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
0
from __future__ import annotations import math from collections.abc import Callable def UpperCAmelCase ( a_ , a_ , a_ , a_ = 1_0_0 , ) -> float: """simple docstring""" __A = x_start __A = fnc(a_ ) __A = 0.0 for _ in range(a_ ): # Approximates curve as a sequence of linear lines and sums their length __A = (x_end - x_start) / steps + xa __A = fnc(a_ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step __A = xa __A = fxa return length if __name__ == "__main__": def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" return math.sin(1_0 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') SCREAMING_SNAKE_CASE :Tuple = 10 while i <= 10_0000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
15
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
"""simple docstring""" import torch from torch import nn class __A ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : Any ,_snake_case : str ,_snake_case : List[Any] ,_snake_case : str ,_snake_case : Optional[Any]=1 ,_snake_case : List[str]=False ) -> Optional[Any]: """simple docstring""" super().__init__() lowercase__ : Optional[Any] = n_token lowercase__ : List[str] = d_embed lowercase__ : int = d_proj lowercase__ : Union[str, Any] = cutoffs + [n_token] lowercase__ : Optional[Any] = [0] + self.cutoffs lowercase__ : Optional[Any] = div_val lowercase__ : Dict = self.cutoffs[0] lowercase__ : str = len(self.cutoffs ) - 1 lowercase__ : Optional[Any] = self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowercase__ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) ) lowercase__ : Any = nn.Parameter(torch.zeros(self.n_clusters ) ) lowercase__ : Dict = nn.ModuleList() lowercase__ : Optional[Any] = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) ) else: self.out_projs.append(_snake_case ) self.out_layers.append(nn.Linear(_snake_case ,_snake_case ) ) else: for i in range(len(self.cutoffs ) ): lowercase__ , lowercase__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ : List[str] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) ) self.out_layers.append(nn.Linear(_snake_case ,r_idx - l_idx ) ) lowercase__ : Union[str, Any] = keep_order def UpperCAmelCase ( self : List[Any] ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Optional[Any] ) -> Tuple: """simple docstring""" if proj is None: lowercase__ : List[Any] = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowercase__ : List[str] = nn.functional.linear(_snake_case ,proj.t().contiguous() ) lowercase__ : Tuple = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def UpperCAmelCase ( self : Dict ,_snake_case : List[str] ,_snake_case : Dict=None ,_snake_case : Dict=False ) -> Optional[int]: """simple docstring""" if labels is not None: # Shift so that tokens < n predict n lowercase__ : List[str] = hidden[..., :-1, :].contiguous() lowercase__ : List[str] = labels[..., 1:].contiguous() lowercase__ : str = hidden.view(-1 ,hidden.size(-1 ) ) lowercase__ : int = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' ) else: lowercase__ : str = hidden.view(-1 ,hidden.size(-1 ) ) if self.n_clusters == 0: lowercase__ : int = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] ) if labels is not None: lowercase__ : Dict = labels != -100 lowercase__ : Union[str, Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device ) lowercase__ : List[str] = ( -nn.functional.log_softmax(_snake_case ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowercase__ : str = nn.functional.log_softmax(_snake_case ,dim=-1 ) else: # construct weights and biases lowercase__ , lowercase__ : Dict = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase__ , lowercase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ : List[str] = self.out_layers[0].weight[l_idx:r_idx] lowercase__ : int = self.out_layers[0].bias[l_idx:r_idx] else: lowercase__ : Union[str, Any] = self.out_layers[i].weight lowercase__ : List[str] = self.out_layers[i].bias if i == 0: lowercase__ : int = torch.cat([weight_i, self.cluster_weight] ,dim=0 ) lowercase__ : Tuple = torch.cat([bias_i, self.cluster_bias] ,dim=0 ) weights.append(_snake_case ) biases.append(_snake_case ) lowercase__ , lowercase__ , lowercase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0] lowercase__ : Optional[int] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ : Tuple = nn.functional.log_softmax(_snake_case ,dim=1 ) if labels is None: lowercase__ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowercase__ : List[Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device ) lowercase__ : Any = 0 lowercase__ : Optional[int] = [0] + self.cutoffs for i in range(len(_snake_case ) - 1 ): lowercase__ , lowercase__ : Any = cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowercase__ : Dict = (labels >= l_idx) & (labels < r_idx) lowercase__ : Optional[int] = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowercase__ : Optional[int] = labels.index_select(0 ,_snake_case ) - l_idx lowercase__ : Tuple = head_logprob.index_select(0 ,_snake_case ) lowercase__ : List[Any] = hidden.index_select(0 ,_snake_case ) else: lowercase__ : int = hidden if i == 0: if labels is not None: lowercase__ : str = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 ) else: lowercase__ : Dict = head_logprob[:, : self.cutoffs[0]] else: lowercase__ , lowercase__ , lowercase__ : Optional[Any] = weights[i], biases[i], self.out_projs[i] lowercase__ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ : Union[str, Any] = nn.functional.log_softmax(_snake_case ,dim=1 ) lowercase__ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowercase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 ,target_i[:, None] ).squeeze(1 ) else: lowercase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowercase__ : Optional[Any] = logprob_i if labels is not None: if (hasattr(self ,'''keep_order''' ) and self.keep_order) or keep_order: out.index_copy_(0 ,_snake_case ,-logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[str] ) -> int: """simple docstring""" if self.n_clusters == 0: lowercase__ : List[Any] = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] ) return nn.functional.log_softmax(_snake_case ,dim=-1 ) else: # construct weights and biases lowercase__ , lowercase__ : Optional[Any] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase__ , lowercase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ : List[Any] = self.out_layers[0].weight[l_idx:r_idx] lowercase__ : List[Any] = self.out_layers[0].bias[l_idx:r_idx] else: lowercase__ : Optional[int] = self.out_layers[i].weight lowercase__ : int = self.out_layers[i].bias if i == 0: lowercase__ : str = torch.cat([weight_i, self.cluster_weight] ,dim=0 ) lowercase__ : Dict = torch.cat([bias_i, self.cluster_bias] ,dim=0 ) weights.append(_snake_case ) biases.append(_snake_case ) lowercase__ , lowercase__ , lowercase__ : List[str] = weights[0], biases[0], self.out_projs[0] lowercase__ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowercase__ : List[Any] = nn.functional.log_softmax(_snake_case ,dim=1 ) lowercase__ : Optional[Any] = [0] + self.cutoffs for i in range(len(_snake_case ) - 1 ): lowercase__ , lowercase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1] if i == 0: lowercase__ : Dict = head_logprob[:, : self.cutoffs[0]] else: lowercase__ , lowercase__ , lowercase__ : List[str] = weights[i], biases[i], self.out_projs[i] lowercase__ : Any = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ : Any = nn.functional.log_softmax(_snake_case ,dim=1 ) lowercase__ : Any = head_logprob[:, -i] + tail_logprob_i lowercase__ : str = logprob_i return out
16
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
0
"""simple docstring""" _a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def _A ( UpperCamelCase_ : List[Any], UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Tuple, UpperCamelCase_ : str) -> Any: '''simple docstring''' __lowercase = [False] * len(UpperCamelCase_) __lowercase = [s] __lowercase = True while queue: __lowercase = queue.pop(0) for ind in range(len(graph[u])): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase_) __lowercase = True __lowercase = u return visited[t] def _A ( UpperCamelCase_ : int, UpperCamelCase_ : Tuple, UpperCamelCase_ : List[str]) -> Optional[int]: '''simple docstring''' __lowercase = [-1] * (len(UpperCamelCase_)) __lowercase = 0 __lowercase = [] __lowercase = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_): __lowercase = float("Inf") __lowercase = sink while s != source: # Find the minimum value in select path __lowercase = min(UpperCamelCase_, graph[parent[s]][s]) __lowercase = parent[s] max_flow += path_flow __lowercase = sink while v != source: __lowercase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase = parent[v] for i in range(len(UpperCamelCase_)): for j in range(len(graph[0])): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j)) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
17
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __lowerCamelCase : str = datasets.logging.get_logger(__name__) __lowerCamelCase : str = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' __lowerCamelCase : Union[str, Any] = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' __lowerCamelCase : List[str] = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' __lowerCamelCase : str = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __UpperCamelCase ( self : Any ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,homepage="https://github.com/google-research/bleurt",inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { "predictions": datasets.Value("string",id="sequence" ), "references": datasets.Value("string",id="sequence" ), } ),codebase_urls=["https://github.com/google-research/bleurt"],reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"],) def __UpperCamelCase ( self : str,_A : Any ): """simple docstring""" if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." ) SCREAMING_SNAKE_CASE_ : str = "bleurt-base-128" if self.config_name.lower() in CHECKPOINT_URLS: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: SCREAMING_SNAKE_CASE_ : List[Any] = self.config_name.upper() else: raise KeyError( F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' ) # download the model checkpoint specified by self.config_name and set up the scorer SCREAMING_SNAKE_CASE_ : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) SCREAMING_SNAKE_CASE_ : List[str] = score.BleurtScorer(os.path.join(_A,_A ) ) def __UpperCamelCase ( self : str,_A : List[Any],_A : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = self.scorer.score(references=_A,candidates=_A ) return {"scores": scores}
18
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
0
import math def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ): lowerCamelCase_ = end or len(lowerCamelCase__ ) for i in range(lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = i lowerCamelCase_ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: lowerCamelCase_ = array[temp_index - 1] temp_index -= 1 lowerCamelCase_ = temp_index_value return array def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap lowerCamelCase_ = index lowerCamelCase_ = 2 * index + 1 # Left Node lowerCamelCase_ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: lowerCamelCase_ = left_index if right_index < heap_size and array[largest] < array[right_index]: lowerCamelCase_ = right_index if largest != index: lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index] heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = len(lowerCamelCase__ ) for i in range(n // 2 , -1 , -1 ): heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for i in range(n - 1 , 0 , -1 ): lowerCamelCase_ , lowerCamelCase_ = array[0], array[i] heapify(lowerCamelCase__ , 0 , lowerCamelCase__ ) return array def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = low lowerCamelCase_ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i lowerCamelCase_ , lowerCamelCase_ = array[j], array[i] i += 1 def lowerCamelCase_ ( lowerCamelCase__ ): if len(lowerCamelCase__ ) == 0: return array lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) ) lowerCamelCase_ = 1_6 return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): while end - start > size_threshold: if max_depth == 0: return heap_sort(lowerCamelCase__ ) max_depth -= 1 lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 ) lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = p return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() __A =input('''Enter numbers separated by a comma : ''').strip() __A =[float(item) for item in user_input.split(''',''')] print(sort(unsorted))
19
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
0
import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any: assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: lowercase : Union[str, Any] = tmp_path / """cache""" lowercase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase : Any = SqlDatasetReader( """dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @require_sqlalchemy @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple: lowercase : Union[str, Any] = tmp_path / """cache""" lowercase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowercase : str = features.copy() if features else default_expected_features lowercase : Optional[Any] = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]: with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con: lowercase : Optional[int] = con.cursor() cur.execute("""SELECT * FROM dataset""" ) for row in cur: yield row @require_sqlalchemy def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: lowercase : Any = tmp_path / """cache""" lowercase : int = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" ) lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read() SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write() lowercase : List[str] = iter_sql_file(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = iter_sql_file(SCREAMING_SNAKE_CASE__ ) for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): assert rowa == rowa @require_sqlalchemy def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: lowercase : Dict = tmp_path / """cache""" lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" ) lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read() SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write() lowercase : Optional[int] = iter_sql_file(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = iter_sql_file(SCREAMING_SNAKE_CASE__ ) for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): assert rowa == rowa @require_sqlalchemy def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]: lowercase : str = tmp_path / """cache""" lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" ) lowercase : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read() with pytest.raises(SCREAMING_SNAKE_CASE__ ): SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
20
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE : List[Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[Any] = ["XLNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[Any] = ["XLNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[Any] = [ "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", "XLNetForSequenceClassification", "XLNetForTokenClassification", "XLNetLMHeadModel", "XLNetModel", "XLNetPreTrainedModel", "load_tf_weights_in_xlnet", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", "TFXLNetForTokenClassification", "TFXLNetLMHeadModel", "TFXLNetMainLayer", "TFXLNetModel", "TFXLNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict" , [ SplitDict(), SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ), SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ), SplitDict({"train": SplitInfo()} ), ] , ) def UpperCAmelCase_ ( __lowercase : SplitDict ) -> int: '''simple docstring''' _UpperCAmelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) _UpperCAmelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump _UpperCAmelCase = None # the split name of split_dict takes over the name of the split info object _UpperCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info" , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name="my_dataset" )] ) def UpperCAmelCase_ ( __lowercase : List[Any] ) -> Dict: '''simple docstring''' _UpperCAmelCase = asdict(SplitDict({"train": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
22
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
'''simple docstring''' import argparse import copy def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Dict: UpperCAmelCase : List[Any] = {} with open(_lowerCAmelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : Tuple = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Union[str, Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : List[str] = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : Dict = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> str: with open(_lowerCAmelCase ) as f: UpperCAmelCase : Dict = f.read(1 ) UpperCAmelCase : Union[str, Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Optional[int] = start_node UpperCAmelCase : Tuple = 0 while visiting not in first_solution: UpperCAmelCase : Dict = 10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCAmelCase ) and k[0] not in first_solution: UpperCAmelCase : Optional[int] = k[1] UpperCAmelCase : List[str] = k[0] first_solution.append(_lowerCAmelCase ) UpperCAmelCase : Optional[Any] = distance_of_first_solution + int(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = best_node first_solution.append(_lowerCAmelCase ) UpperCAmelCase : str = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ) -> List[Any]: UpperCAmelCase : int = [] for n in solution[1:-1]: UpperCAmelCase : List[Any] = solution.index(_lowerCAmelCase ) for kn in solution[1:-1]: UpperCAmelCase : List[Any] = solution.index(_lowerCAmelCase ) if n == kn: continue UpperCAmelCase : int = copy.deepcopy(_lowerCAmelCase ) UpperCAmelCase : int = kn UpperCAmelCase : Union[str, Any] = n UpperCAmelCase : Tuple = 0 for k in _tmp[:-1]: UpperCAmelCase : str = _tmp[_tmp.index(_lowerCAmelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : Union[str, Any] = distance + int(i[1] ) _tmp.append(_lowerCAmelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : Optional[int] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowerCAmelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> str: UpperCAmelCase : List[Any] = 1 UpperCAmelCase : Dict = first_solution UpperCAmelCase : Dict = [] UpperCAmelCase : Optional[int] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : str = find_neighborhood(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : Tuple = neighborhood[index_of_best_solution] UpperCAmelCase : List[str] = len(_lowerCAmelCase ) - 1 UpperCAmelCase : Optional[Any] = False while not found: UpperCAmelCase : int = 0 while i < len(_lowerCAmelCase ): if best_solution[i] != solution[i]: UpperCAmelCase : Any = best_solution[i] UpperCAmelCase : Any = solution[i] break UpperCAmelCase : Any = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : int = True UpperCAmelCase : Union[str, Any] = best_solution[:-1] UpperCAmelCase : List[str] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Any = cost UpperCAmelCase : int = solution else: UpperCAmelCase : Tuple = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(_lowerCAmelCase ) >= size: tabu_list.pop(0 ) UpperCAmelCase : Optional[int] = count + 1 return best_solution_ever, best_cost def snake_case_ ( _lowerCAmelCase : Union[str, Any]=None ) -> Tuple: UpperCAmelCase : Optional[int] = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , _lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = tabu_search( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": UpperCamelCase__: List[Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
23
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowerCamelCase__ ( ) -> Tuple: __snake_case = torch.nn.Linear(2 , 4 ) __snake_case = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case = torch.optim.lr_scheduler.OneCycleLR(snake_case_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowerCamelCase__ ( snake_case_ : Any ) -> Any: return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]: __snake_case = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(snake_case_ ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): @require_cuda def a (self : Tuple ): """simple docstring""" __snake_case = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a__ ): __snake_case = Accelerator(cpu=a__ ) def a (self : Tuple ): """simple docstring""" __snake_case = Accelerator() __snake_case = GradientState() assert state.num_steps == 1 __snake_case = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case = False assert state.sync_gradients is False GradientState._reset_state() def a (self : List[str] ): """simple docstring""" __snake_case = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = accelerator.prepare(a__ , a__ , a__ , a__ , a__ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def a (self : Dict ): """simple docstring""" __snake_case = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components() accelerator.prepare(a__ , a__ , a__ , a__ , a__ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def a (self : str ): """simple docstring""" PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a__ : List[Any] , **a__ : List[Any] ): pass with patch('''torch.cuda.set_device''' , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components() accelerator.prepare(a__ , a__ , a__ , a__ , a__ ) __snake_case = get_signature(a__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a__ ) # make sure random weights don't match load_random_weights(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 ) def a (self : int ): """simple docstring""" __snake_case = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components() accelerator.prepare(a__ , a__ , a__ , a__ , a__ ) __snake_case = get_signature(a__ ) # saving hook def save_config(a__ : Any , a__ : Any , a__ : Optional[int] ): __snake_case = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a__ , '''data.json''' ) , '''w''' ) as f: json.dump(a__ , a__ ) # loading hook def load_config(a__ : str , a__ : Optional[Any] ): with open(os.path.join(a__ , '''data.json''' ) , '''r''' ) as f: __snake_case = json.load(a__ ) __snake_case = config['''class_name'''] __snake_case = accelerator.register_save_state_pre_hook(a__ ) __snake_case = accelerator.register_load_state_pre_hook(a__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a__ ) # make sure random weights don't match with hooks load_random_weights(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a__ ) # make sure random weights don't match with hooks removed load_random_weights(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def a (self : List[str] ): """simple docstring""" __snake_case = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components() __snake_case = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare( a__ , a__ , a__ , a__ , a__ , a__ ) self.assertTrue(dummy_obj is None ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components() __snake_case = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare( a__ , a__ , a__ , a__ , a__ , a__ ) self.assertEqual( getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def a (self : Optional[Any] ): """simple docstring""" from transformers import AutoModelForCausalLM __snake_case = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a__ , device_map={'''''': 0} , ) __snake_case = Accelerator() # This should work __snake_case = accelerator.prepare(a__ ) @slow @require_bnb def a (self : Any ): """simple docstring""" from transformers import AutoModelForCausalLM __snake_case = Accelerator() with init_empty_weights(): __snake_case = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case = infer_auto_device_map(a__ ) __snake_case = '''cpu''' __snake_case = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ ) # This should not work and get value error with self.assertRaises(a__ ): __snake_case = accelerator.prepare(a__ ) @slow @require_bnb @require_multi_gpu def a (self : int ): """simple docstring""" from transformers import AutoModelForCausalLM __snake_case = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case = infer_auto_device_map(a__ ) __snake_case = 1 __snake_case = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a__ , device_map=a__ , ) __snake_case = Accelerator() # This should not work and get value error with self.assertRaises(a__ ): __snake_case = accelerator.prepare(a__ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def a (self : int ): """simple docstring""" from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case = infer_auto_device_map(a__ ) __snake_case = 1 __snake_case = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a__ , device_map=a__ , ) __snake_case = Accelerator() # This should work __snake_case = accelerator.prepare(a__ ) @require_cuda def a (self : str ): """simple docstring""" __snake_case = torch.nn.Linear(10 , 10 ) __snake_case = torch.optim.SGD(model.parameters() , lr=0.0_1 ) __snake_case = Accelerator(cpu=a__ ) __snake_case = accelerator.prepare(a__ )
24
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
"""simple docstring""" from __future__ import annotations class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = TypeError( """Matrices must be formed from a list of zero or more lists containing at """ """least one and the same number of values, each of which must be of type """ """int or float.""" ) if len(SCREAMING_SNAKE_CASE__ ) != 0: SCREAMING_SNAKE_CASE__ : Optional[int] = len(rows[0] ) if cols == 0: raise error for row in rows: if len(SCREAMING_SNAKE_CASE__ ) != cols: raise error for value in row: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): raise error SCREAMING_SNAKE_CASE__ : int = rows else: SCREAMING_SNAKE_CASE__ : List[str] = [] def __magic_name__ (self ) -> list[list[int]]: """simple docstring""" return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def __magic_name__ (self ) -> int: """simple docstring""" return len(self.rows ) @property def __magic_name__ (self ) -> int: """simple docstring""" return len(self.rows[0] ) @property def __magic_name__ (self ) -> tuple[int, int]: """simple docstring""" return (self.num_rows, self.num_columns) @property def __magic_name__ (self ) -> bool: """simple docstring""" return self.order[0] == self.order[1] def __magic_name__ (self ) -> Matrix: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> int: """simple docstring""" if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def __magic_name__ (self ) -> bool: """simple docstring""" return bool(self.determinant() ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(SCREAMING_SNAKE_CASE__ ).determinant() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" if (row + column) % 2 == 0: return self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return -1 * self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Matrix: """simple docstring""" return Matrix( [ [self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def __magic_name__ (self ) -> Matrix: """simple docstring""" return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def __magic_name__ (self ) -> Matrix: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Matrix: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.determinant() if not determinant: raise TypeError("""Only matrices with a non-zero determinant have an inverse""" ) return self.adjugate() * (1 / determinant) def __repr__(self ) -> str: """simple docstring""" return str(self.rows ) def __str__(self ) -> str: """simple docstring""" if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ """[""" + """. """.join([str(SCREAMING_SNAKE_CASE__ ) for value in row] ) + """.]""" for row in self.rows ] ) + "]" ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TypeError("""Row must be a list containing all ints and/or floats""" ) if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise type_error for value in row: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): raise type_error if len(SCREAMING_SNAKE_CASE__ ) != self.num_columns: raise ValueError( """Row must be equal in length to the other rows in the matrix""" ) if position is None: self.rows.append(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Any = self.rows[0:position] + [row] + self.rows[position:] def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TypeError( """Column must be a list containing all ints and/or floats""" ) if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise type_error for value in column: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): raise type_error if len(SCREAMING_SNAKE_CASE__ ) != self.num_rows: raise ValueError( """Column must be equal in length to the other columns in the matrix""" ) if position is None: SCREAMING_SNAKE_CASE__ : List[str] = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: SCREAMING_SNAKE_CASE__ : int = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__(self , SCREAMING_SNAKE_CASE__ ) -> bool: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return NotImplemented return self.rows == other.rows def __ne__(self , SCREAMING_SNAKE_CASE__ ) -> bool: """simple docstring""" return not self == other def __neg__(self ) -> Matrix: """simple docstring""" return self * -1 def __add__(self , SCREAMING_SNAKE_CASE__ ) -> Matrix: """simple docstring""" if self.order != other.order: raise ValueError("""Addition requires matrices of the same order""" ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__(self , SCREAMING_SNAKE_CASE__ ) -> Matrix: """simple docstring""" if self.order != other.order: raise ValueError("""Subtraction requires matrices of the same order""" ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> Matrix: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if self.num_columns != other.num_rows: raise ValueError( """The number of columns in the first matrix must """ """be equal to the number of rows in the second""" ) return Matrix( [ [Matrix.dot_product(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( """A Matrix can only be multiplied by an int, float, or another matrix""" ) def __pow__(self , SCREAMING_SNAKE_CASE__ ) -> Matrix: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError("""A Matrix can only be raised to the power of an int""" ) if not self.is_square: raise ValueError("""Only square matrices can be raised to a power""" ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( """Only invertable matrices can be raised to a negative power""" ) SCREAMING_SNAKE_CASE__ : Dict = self for _ in range(other - 1 ): result *= self return result @classmethod def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" return sum(row[i] * column[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
25
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0
def lowerCAmelCase_ ( snake_case_ ): _A : str = [0 for i in range(len(snake_case_ ) )] # initialize interval's left pointer and right pointer _A , _A : Any = 0, 0 for i in range(1,len(snake_case_ ) ): # case when current index is inside the interval if i <= right_pointer: _A : str = min(right_pointer - i + 1,z_result[i - left_pointer] ) _A : Optional[int] = min_edge while go_next(snake_case_,snake_case_,snake_case_ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: _A , _A : Tuple = i, i + z_result[i] - 1 return z_result def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): return i + z_result[i] < len(snake_case_ ) and s[z_result[i]] == s[i + z_result[i]] def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : List[Any] = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string _A : Optional[Any] = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case_ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
26
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
0
'''simple docstring''' from __future__ import annotations from math import gcd def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 2 , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : int = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: return (pow(_SCREAMING_SNAKE_CASE , 2 ) + step) % modulus for _ in range(_SCREAMING_SNAKE_CASE ): # These track the position within the cycle detection logic. __a : int = seed __a : Tuple = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. __a : List[str] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[int] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. __a : List[str] = gcd(hare - tortoise , _SCREAMING_SNAKE_CASE ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. __a : List[str] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowercase : Any = argparse.ArgumentParser() parser.add_argument( 'num', type=int, help='The value to find a divisor of', ) parser.add_argument( '--attempts', type=int, default=3, help='The number of attempts before giving up', ) __lowercase : List[str] = parser.parse_args() __lowercase : List[str] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: __lowercase : List[str] = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
27
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
'''simple docstring''' import requests from bsa import BeautifulSoup def __lowerCamelCase ( A__ = "AAPL" ) -> str: """simple docstring""" UpperCamelCase = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" UpperCamelCase = BeautifulSoup(requests.get(A__ ).text , 'html.parser' ) UpperCamelCase = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_ ).find('span' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
28
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def lowercase__ ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : List[Any] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase_ : Tuple = 192 UpperCAmelCase_ : str = 768 UpperCAmelCase_ : List[Any] = 12 UpperCAmelCase_ : Optional[int] = 3 UpperCAmelCase_ : Optional[Any] = [800, 1_333] UpperCAmelCase_ : Dict = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase_ : Tuple = 330 UpperCAmelCase_ : List[str] = 14 UpperCAmelCase_ : Tuple = 6 UpperCAmelCase_ : List[str] = 1_320 elif "yolos_s" in yolos_name: UpperCAmelCase_ : Any = 384 UpperCAmelCase_ : Union[str, Any] = 1_536 UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : Any = 6 elif "yolos_b" in yolos_name: UpperCAmelCase_ : Optional[int] = [800, 1_344] UpperCAmelCase_ : str = 91 UpperCAmelCase_ : Optional[Any] = 'huggingface/label-files' UpperCAmelCase_ : Any = 'coco-detection-id2label.json' UpperCAmelCase_ : Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) ) UpperCAmelCase_ : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()} UpperCAmelCase_ : Optional[Any] = idalabel UpperCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()} return config def lowercase__ ( __snake_case : dict , __snake_case : YolosConfig , __snake_case : bool = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase_ : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Tuple = in_proj_weight[: config.hidden_size, :] UpperCAmelCase_ : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : List[str] = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :] def lowercase__ ( __snake_case : str ): '''simple docstring''' if "backbone" in name: UpperCAmelCase_ : List[str] = name.replace('backbone' , 'vit' ) if "cls_token" in name: UpperCAmelCase_ : List[Any] = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: UpperCAmelCase_ : Dict = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: UpperCAmelCase_ : Optional[int] = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: UpperCAmelCase_ : List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: UpperCAmelCase_ : Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: UpperCAmelCase_ : List[str] = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: UpperCAmelCase_ : List[str] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: UpperCAmelCase_ : Dict = name.replace('attn' , 'attention.self' ) if "norm1" in name: UpperCAmelCase_ : List[Any] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: UpperCAmelCase_ : List[Any] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: UpperCAmelCase_ : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: UpperCAmelCase_ : str = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: UpperCAmelCase_ : Any = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: UpperCAmelCase_ : Dict = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: UpperCAmelCase_ : Any = name.replace('vit.norm' , 'vit.layernorm' ) return name def lowercase__ ( __snake_case : dict , __snake_case : YolosForObjectDetection ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : List[str] = orig_state_dict.pop(__snake_case ) if "qkv" in key: UpperCAmelCase_ : Any = key.split('.' ) UpperCAmelCase_ : Optional[int] = int(key_split[2] ) UpperCAmelCase_ : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase_ : str = val[:dim, :] UpperCAmelCase_ : str = val[ dim : dim * 2, : ] UpperCAmelCase_ : int = val[-dim:, :] else: UpperCAmelCase_ : List[Any] = val[:dim] UpperCAmelCase_ : Optional[Any] = val[dim : dim * 2] UpperCAmelCase_ : List[str] = val[-dim:] else: UpperCAmelCase_ : List[str] = val return orig_state_dict def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCAmelCase_ : Any = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def lowercase__ ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : bool = False ): '''simple docstring''' UpperCAmelCase_ : str = get_yolos_config(__snake_case ) # load original state_dict UpperCAmelCase_ : Tuple = torch.load(__snake_case , map_location='cpu' )['model'] # load 🤗 model UpperCAmelCase_ : Optional[Any] = YolosForObjectDetection(__snake_case ) model.eval() UpperCAmelCase_ : List[str] = convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase_ : int = 800 if yolos_name != 'yolos_ti' else 512 UpperCAmelCase_ : str = YolosImageProcessor(format='coco_detection' , size=__snake_case ) UpperCAmelCase_ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' ) UpperCAmelCase_ : Optional[Any] = model(**__snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.logits, outputs.pred_boxes UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase_ : Optional[Any] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) UpperCAmelCase_ : Union[str, Any] = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase_ : Any = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) UpperCAmelCase_ : Dict = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase_ : str = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) UpperCAmelCase_ : Optional[Any] = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase_ : int = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) UpperCAmelCase_ : Union[str, Any] = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": UpperCAmelCase_ : Optional[int] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) UpperCAmelCase_ : str = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__snake_case ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__snake_case ) if push_to_hub: UpperCAmelCase_ : List[str] = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) UpperCAmelCase_ : str = model_mapping[yolos_name] image_processor.push_to_hub(__snake_case , organization='hustvl' ) model.push_to_hub(__snake_case , organization='hustvl' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __UpperCAmelCase = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
29
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
0
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy''' def _lowercase ( self : List[str] ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=(4, 4, 6_4, 6_4) , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]: lowercase_ = jnp.bfloataa if fpaa else jnp.floataa lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ ) return image def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Any="CompVis/stable-diffusion-v1-4" ) -> str: lowercase_ = jnp.bfloataa if fpaa else jnp.floataa lowercase_ = '''bf16''' if fpaa else None lowercase_ , lowercase_ = FlaxUNetaDConditionModel.from_pretrained( SCREAMING_SNAKE_CASE_ , subfolder='''unet''' , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ ) return model, params def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Dict=(4, 7_7, 7_6_8) , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Dict: lowercase_ = jnp.bfloataa if fpaa else jnp.floataa lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]], [1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]], [8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]], [3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]], # fmt: on ] ) def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> int: lowercase_ , lowercase_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ ) lowercase_ = model.apply( {'''params''': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample assert sample.shape == latents.shape lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowercase_ = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]], [1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]], [8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]], [3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]], # fmt: on ] ) def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> Dict: lowercase_ , lowercase_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 9_6, 9_6) , fpaa=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 7_7, 1_0_2_4) , fpaa=SCREAMING_SNAKE_CASE_ ) lowercase_ = model.apply( {'''params''': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample assert sample.shape == latents.shape lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowercase_ = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
30
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
0
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger() @dataclass class lowerCamelCase_ : '''simple docstring''' __UpperCamelCase: nn.Module __UpperCamelCase: List[nn.Module] = field(default_factory=snake_case__ ) __UpperCamelCase: list = field(default_factory=snake_case__ ) def _A ( self : List[str] , A : Optional[int] , A : Tensor , A : Tensor ): _UpperCAmelCase : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad ) if has_not_submodules: self.traced.append(A ) def __call__( self : Any , A : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(A ) [x.remove() for x in self.handles] return self @property def _A ( self : List[Any] ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCamelCase_ : '''simple docstring''' __UpperCamelCase: nn.Module __UpperCamelCase: nn.Module __UpperCamelCase: int = 0 __UpperCamelCase: List = field(default_factory=snake_case__ ) __UpperCamelCase: List = field(default_factory=snake_case__ ) def __call__( self : Optional[Any] , A : Tensor ): _UpperCAmelCase : Optional[Any] = Tracker(self.dest )(A ).parametrized _UpperCAmelCase : List[Any] = Tracker(self.src )(A ).parametrized _UpperCAmelCase : str = list(filter(lambda A : type(A ) not in self.src_skip , A ) ) _UpperCAmelCase : int = list(filter(lambda A : type(A ) not in self.dest_skip , A ) ) if len(A ) != len(A ): raise Exception( F"""Numbers of operations are different. Source module has {len(A )} operations while""" F""" destination module has {len(A )}.""" ) for dest_m, src_m in zip(A , A ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : ResNetConfig , _UpperCAmelCase : Path , _UpperCAmelCase : bool = True ) -> str: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): _UpperCAmelCase : Any = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval() _UpperCAmelCase : Union[str, Any] = ResNetForImageClassification(_UpperCAmelCase ).eval() _UpperCAmelCase : Optional[int] = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase ) _UpperCAmelCase : List[Any] = torch.randn((1, 3, 224, 224) ) module_transfer(_UpperCAmelCase ) assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one." _UpperCAmelCase : Tuple = F"""resnet{'-'.join(name.split('resnet' ) )}""" print(_UpperCAmelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_UpperCAmelCase , ) # we can use the convnext one _UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_UpperCAmelCase , ) print(F"""Pushed {checkpoint_name}""" ) def UpperCamelCase_ ( _UpperCAmelCase : Path , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = True ) -> List[str]: """simple docstring""" _UpperCAmelCase : Dict = "imagenet-1k-id2label.json" _UpperCAmelCase : Optional[int] = 1_000 _UpperCAmelCase : Optional[int] = (1, num_labels) _UpperCAmelCase : Union[str, Any] = "huggingface/label-files" _UpperCAmelCase : int = num_labels _UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase : Optional[int] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} _UpperCAmelCase : str = idalabel _UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} _UpperCAmelCase : Union[str, Any] = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) _UpperCAmelCase : Optional[int] = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, expected_shape if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) __SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() __SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
31
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase_ : Any = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE_ ( __A : torch.nn.Module , __A : BnbQuantizationConfig , __A : Union[str, os.PathLike] = None , __A : Optional[Dict[str, Union[int, str, torch.device]]] = None , __A : Optional[List[str]] = None , __A : Optional[Dict[Union[int, str], Union[int, str]]] = None , __A : Optional[Union[str, os.PathLike]] = None , __A : bool = False , ) -> Dict: """simple docstring""" a_ : Optional[int] = bnb_quantization_config.load_in_abit a_ : Any = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) a_ : List[Any] = [] # custom device map if isinstance(__A , __A ) and len(device_map.keys() ) > 1: a_ : Optional[int] = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: a_ : List[str] = get_keys_to_not_convert(__A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__A ) a_ : Union[str, Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: a_ : List[Any] = [] a_ : str = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__A ) # compatibility with peft a_ : Any = load_in_abit a_ : List[Any] = load_in_abit a_ : List[Any] = get_parameter_device(__A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) a_ : Any = replace_with_bnb_layers(__A , __A , modules_to_not_convert=__A ) # convert param to the right dtype a_ : Union[str, Any] = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: a_ : int = name.replace('.weight' , '' ).replace('.bias' , '' ) a_ : int = getattr(__A , __A , __A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__A ): param.to(__A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): a_ : List[Any] = replace_with_bnb_layers( __A , __A , modules_to_not_convert=__A ) a_ : Any = get_quantized_model_device_map( __A , __A , __A , max_memory=__A , no_split_module_classes=__A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): a_ : Dict = True a_ : Any = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( __A , __A , __A , dtype=bnb_quantization_config.torch_dtype , offload_folder=__A , offload_state_dict=__A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__A , device_map=__A , offload_dir=__A ) def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Dict , __A : List[Any]=None , __A : Union[str, Any]=None , __A : Optional[Any]=None ) -> str: """simple docstring""" if device_map is None: if torch.cuda.is_available(): a_ : Dict = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(__A , __A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) a_ : Any = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) a_ : Optional[Any] = {} a_ : Union[str, Any] = special_dtypes a_ : Optional[int] = no_split_module_classes a_ : str = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": a_ : Optional[Any] = get_balanced_memory( __A , low_zero=(device_map == 'balanced_low_0') , max_memory=__A , **__A , ) a_ : int = max_memory a_ : Optional[int] = infer_auto_device_map(__A , **__A ) if isinstance(__A , __A ): # check if don't have any quantized module on the cpu a_ : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules a_ : Optional[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] , __A : List[Any]=None , __A : List[Any]=None ) -> Any: """simple docstring""" if modules_to_not_convert is None: a_ : Union[str, Any] = [] a_ , a_ : List[str] = _replace_with_bnb_layers( __A , __A , __A , __A ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : Tuple=None , __A : Any=None , ) -> Any: """simple docstring""" a_ : int = False for name, module in model.named_children(): if current_key_name is None: a_ : List[str] = [] current_key_name.append(__A ) if isinstance(__A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` a_ : Union[str, Any] = '.'.join(__A ) a_ : Any = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: a_ : int = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: a_ : Union[str, Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: a_ : Optional[int] = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) a_ : Union[str, Any] = module.weight.data if module.bias is not None: a_ : str = module.bias.data bnb_module.requires_grad_(__A ) setattr(__A , __A , __A ) a_ : int = True if len(list(module.children() ) ) > 0: a_ , a_ : List[str] = _replace_with_bnb_layers( __A , __A , __A , __A ) a_ : List[str] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Optional[int]: """simple docstring""" with init_empty_weights(): a_ : List[str] = deepcopy(__A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` a_ : List[str] = find_tied_parameters(__A ) # For compatibility with Accelerate < 0.18 if isinstance(__A , __A ): a_ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: a_ : Any = sum(__A , [] ) a_ : Optional[int] = len(__A ) > 0 # Check if it is a base model a_ : Union[str, Any] = False if hasattr(__A , 'base_model_prefix' ): a_ : Union[str, Any] = not hasattr(__A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head a_ : Tuple = list(model.named_children() ) a_ : str = [list_modules[-1][0]] # add last module together with tied weights a_ : List[str] = set(__A ) - set(__A ) a_ : Dict = list(set(__A ) ) + list(__A ) # remove ".weight" from the keys a_ : List[str] = ['.weight', '.bias'] a_ : List[Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: a_ : Dict = name.replace(__A , '' ) filtered_module_names.append(__A ) return filtered_module_names def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Any: """simple docstring""" for m in model.modules(): if isinstance(__A , bnb.nn.Linearabit ): return True return False def SCREAMING_SNAKE_CASE_ ( __A : nn.Module ) -> Union[str, Any]: """simple docstring""" return next(parameter.parameters() ).device def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : int , __A : Optional[Any] , __A : List[str] , __A : Dict , __A : Tuple , __A : Optional[Any] ) -> Dict: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(__A , __A , 0 , dtype=__A , value=__A ) a_ : Optional[int] = param_name a_ : List[str] = model if "." in tensor_name: a_ : int = tensor_name.split('.' ) for split in splits[:-1]: a_ : int = getattr(__A , __A ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) a_ : Optional[Any] = new_module a_ : List[str] = splits[-1] # offload weights a_ : Union[str, Any] = False offload_weight(module._parameters[tensor_name] , __A , __A , index=__A ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __A , index=__A , ) else: offload_weight(__A , __A , __A , index=__A ) offload_weight(__A , param_name.replace('weight' , 'SCB' ) , __A , index=__A ) set_module_tensor_to_device(__A , __A , 'meta' , dtype=__A , value=torch.empty(*param.size() ) )
32
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : int = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : List[str] = "open-llama" def __init__( self : Dict , A : Optional[Any]=10_00_00 , A : Optional[int]=40_96 , A : Tuple=1_10_08 , A : List[Any]=32 , A : Optional[Any]=32 , A : List[str]="silu" , A : Optional[int]=20_48 , A : Dict=0.02 , A : Tuple=1e-6 , A : Optional[int]=True , A : Dict=0 , A : Any=1 , A : Optional[Any]=2 , A : Tuple=False , A : Dict=True , A : Dict=0.1 , A : int=0.1 , A : Optional[Any]=True , A : List[str]=True , A : int=None , **A : Optional[int] , ) -> int: lowercase_ : Tuple = vocab_size lowercase_ : Tuple = max_position_embeddings lowercase_ : int = hidden_size lowercase_ : Any = intermediate_size lowercase_ : Dict = num_hidden_layers lowercase_ : str = num_attention_heads lowercase_ : Optional[Any] = hidden_act lowercase_ : List[Any] = initializer_range lowercase_ : Optional[Any] = rms_norm_eps lowercase_ : List[str] = use_cache lowercase_ : Dict = kwargs.pop( '''use_memorry_efficient_attention''' , A ) lowercase_ : List[Any] = hidden_dropout_prob lowercase_ : Tuple = attention_dropout_prob lowercase_ : Tuple = use_stable_embedding lowercase_ : Optional[int] = shared_input_output_embedding lowercase_ : Optional[int] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def A ( self : Optional[int] ) -> List[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F'''got {self.rope_scaling}''' ) lowercase_ : Any = self.rope_scaling.get('''type''' , A ) lowercase_ : int = self.rope_scaling.get('''factor''' , A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(A , A ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
33
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
'''simple docstring''' import requests from bsa import BeautifulSoup def snake_case_ (_a : str = "https://www.worldometers.info/coronavirus" ): UpperCAmelCase = BeautifulSoup(requests.get(_a ).text , '''html.parser''' ) UpperCAmelCase = soup.findAll('''h1''' ) UpperCAmelCase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} ) keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} ) values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} ) return {key.text.strip(): value.text.strip() for key, value in zip(_a , _a )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
34
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
0
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : List[Any] ): snake_case__ : str = tempfile.mkdtemp() snake_case__ : Tuple = 5 # Realm tok snake_case__ : Union[str, Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """realm_tokenizer""" ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : Union[str, Any] = os.path.join(snake_case_ , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) snake_case__ : List[Any] = os.path.join(self.tmpdirname , """realm_block_records""" ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) def lowerCamelCase ( self : List[str] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) ) def lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self : int ): snake_case__ : List[Any] = RealmConfig(num_block_records=self.num_block_records ) return config def lowerCamelCase ( self : int ): snake_case__ : str = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def lowerCamelCase ( self : List[str] ): snake_case__ : str = np.array( [ b"""This is the first record""", b"""This is the second record""", b"""This is the third record""", b"""This is the fourth record""", b"""This is the fifth record""", b"""This is a longer longer longer record""", ] , dtype=snake_case_ , ) return block_records def lowerCamelCase ( self : int ): snake_case__ : Dict = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def lowerCamelCase ( self : int ): snake_case__ : Tuple = self.get_config() snake_case__ : Any = self.get_dummy_retriever() snake_case__ : str = retriever.tokenizer snake_case__ : Optional[int] = np.array([0, 3] , dtype="""long""" ) snake_case__ : List[Any] = tokenizer(["""Test question"""] ).input_ids snake_case__ : List[str] = tokenizer( ["""the fourth"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids snake_case__ : List[Any] = config.reader_seq_len snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = retriever( snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : List[str] = self.get_config() snake_case__ : List[str] = self.get_dummy_retriever() snake_case__ : str = retriever.tokenizer snake_case__ : List[Any] = np.array([0, 3, 5] , dtype="""long""" ) snake_case__ : Union[str, Any] = tokenizer(["""Test question"""] ).input_ids snake_case__ : Optional[int] = tokenizer( ["""the fourth""", """longer longer"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids snake_case__ : Any = config.reader_seq_len snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = retriever( snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" ) self.assertEqual([False, True, True] , snake_case_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : List[str] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) # Test local path snake_case__ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: snake_case__ : Tuple = os.path.join( os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME ) snake_case__ : List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
35
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0
import requests from bsa import BeautifulSoup def A ( _lowerCamelCase = "AAPL" ): '''simple docstring''' _lowerCAmelCase : str = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" _lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" ) _lowerCAmelCase : List[Any] = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
36
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[Any]: lowerCAmelCase__ : str = path_or_paths lowerCAmelCase__ : Optional[Any] = split if split or isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else """train""" lowerCAmelCase__ : int = features lowerCAmelCase__ : Optional[Any] = cache_dir lowerCAmelCase__ : Any = keep_in_memory lowerCAmelCase__ : Union[str, Any] = streaming lowerCAmelCase__ : Optional[int] = num_proc lowerCAmelCase__ : Tuple = kwargs @abstractmethod def UpperCAmelCase_ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Dict: lowerCAmelCase__ : str = features lowerCAmelCase__ : str = cache_dir lowerCAmelCase__ : int = keep_in_memory lowerCAmelCase__ : int = streaming lowerCAmelCase__ : int = num_proc lowerCAmelCase__ : int = kwargs @abstractmethod def UpperCAmelCase_ ( self ) -> Union[Dataset, IterableDataset]: pass
37
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
from collections.abc import Generator from math import sin def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" if len(__magic_name__ ) != 32: raise ValueError("""Input must be of length 32""" ) UpperCamelCase :int = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:] UpperCamelCase :Union[str, Any] = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :str = B"""""" for char in message: bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" ) UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(__magic_name__ ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCamelCase :Tuple = bit_string[pos : pos + 512] UpperCamelCase :Optional[int] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :List[str] = format(__magic_name__ , """032b""" ) UpperCamelCase :Any = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" return (a + b) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :Tuple = preprocess(__magic_name__ ) UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCamelCase :Union[str, Any] = 0X67_45_23_01 UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89 UpperCamelCase :List[str] = 0X98_BA_DC_FE UpperCamelCase :int = 0X10_32_54_76 UpperCamelCase :int = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCamelCase :Optional[Any] = aa UpperCamelCase :Any = ba UpperCamelCase :Tuple = ca UpperCamelCase :List[str] = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCamelCase :int = d ^ (b & (c ^ d)) UpperCamelCase :Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCamelCase :str = c ^ (d & (b ^ c)) UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16 elif i <= 47: UpperCamelCase :str = b ^ c ^ d UpperCamelCase :Optional[int] = (3 * i + 5) % 16 else: UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ )) UpperCamelCase :int = (7 * i) % 16 UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCamelCase :Tuple = d UpperCamelCase :str = c UpperCamelCase :Tuple = b UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
38
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
0
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class __lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=50 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=None , ): """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = initializer_range _UpperCAmelCase = use_labels _UpperCAmelCase = scope def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, token_labels def UpperCamelCase ( self ): """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCamelCase ( self ): """simple docstring""" ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = self.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ): """simple docstring""" _UpperCAmelCase = BertGenerationEncoder(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase ) _UpperCAmelCase = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ): """simple docstring""" _UpperCAmelCase = True _UpperCAmelCase = BertGenerationEncoder(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _UpperCAmelCase = model( UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , ) _UpperCAmelCase = model( UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ): """simple docstring""" _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = BertGenerationDecoder(config=UpperCAmelCase ).to(UpperCAmelCase ).eval() # first forward pass _UpperCAmelCase = model( UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , ) _UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase = model( UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0] _UpperCAmelCase = model( UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , ): """simple docstring""" _UpperCAmelCase = BertGenerationDecoder(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowerCamelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCamelCase__ = (BertGenerationDecoder,) if is_torch_available() else () UpperCamelCase__ = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = BertGenerationEncoderTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def UpperCamelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = 'bert' self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase ) def UpperCamelCase ( self ): """simple docstring""" ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() _UpperCAmelCase = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase ) @slow def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(UpperCAmelCase ) @require_torch class __lowerCamelCase ( unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) _UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): _UpperCAmelCase = model(UpperCAmelCase )[0] _UpperCAmelCase = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , UpperCAmelCase ) _UpperCAmelCase = torch.tensor( [[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) ) @require_torch class __lowerCamelCase ( unittest.TestCase): """simple docstring""" @slow def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) _UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): _UpperCAmelCase = model(UpperCAmelCase )[0] _UpperCAmelCase = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape , UpperCAmelCase ) _UpperCAmelCase = torch.tensor( [[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
39
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_lowercase ) , """Tatoeba directory does not exist.""" ) class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = tempfile.mkdtemp() return TatoebaConverter(save_dir=UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): self.resolver.convert_models(["""heb-eng"""] ) @slow def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Dict = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=UpperCamelCase__ ) assert mmeta["long_pair"] == "heb-eng"
41
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
0
'''simple docstring''' from scipy.stats import spearmanr import datasets lowercase : Optional[Any] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n" lowercase : List[str] = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n" lowercase : Optional[int] = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCAmelCase ( datasets.Metric ): def lowerCamelCase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case = spearmanr(lowerCAmelCase_ , lowerCAmelCase_ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
42
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
0
import os from collections import deque import torch from torch.utils.data import Dataset class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self , __lowercase="" , __lowercase="train") -> Dict: assert os.path.isdir(__lowercase) __UpperCamelCase :Any = [] __UpperCamelCase :Optional[Any] = os.listdir(__lowercase) for story_filename in story_filenames_list: if "summary" in story_filename: continue __UpperCamelCase :Dict = os.path.join(__lowercase , __lowercase) if not os.path.isfile(__lowercase): continue self.documents.append(__lowercase) def __len__( self) -> List[Any]: return len(self.documents) def __getitem__( self , __lowercase) -> Union[str, Any]: __UpperCamelCase :List[Any] = self.documents[idx] __UpperCamelCase :List[Any] = document_path.split('''/''')[-1] with open(__lowercase , encoding='''utf-8''') as source: __UpperCamelCase :int = source.read() __UpperCamelCase , __UpperCamelCase :int = process_story(__lowercase) return document_name, story_lines, summary_lines def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE : len(SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) ) # for some unknown reason some lines miss a period, add it __UpperCamelCase :Tuple = [_add_missing_period(SCREAMING_SNAKE_CASE ) for line in nonempty_lines] # gather article lines __UpperCamelCase :Dict = [] __UpperCamelCase :Tuple = deque(SCREAMING_SNAKE_CASE ) while True: try: __UpperCamelCase :Optional[int] = lines.popleft() if element.startswith('''@highlight''' ): break story_lines.append(SCREAMING_SNAKE_CASE ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines __UpperCamelCase :Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE : not t.startswith('''@highlight''' ) , SCREAMING_SNAKE_CASE ) ) return story_lines, summary_lines def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Dict = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')'''] if line.startswith('''@highlight''' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE )) ) return sequence def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Union[str, Any] = torch.ones_like(SCREAMING_SNAKE_CASE ) __UpperCamelCase :str = sequence == pad_token_id __UpperCamelCase :str = 0 return mask def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[str] = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in story_lines] __UpperCamelCase :Tuple = [token for sentence in story_lines_token_ids for token in sentence] __UpperCamelCase :List[str] = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in summary_lines] __UpperCamelCase :List[Any] = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Dict = [] for sequence in batch: __UpperCamelCase :Union[str, Any] = -1 __UpperCamelCase :List[Any] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(SCREAMING_SNAKE_CASE ) return torch.tensor(SCREAMING_SNAKE_CASE )
43
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
0
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __A ( unittest.TestCase ): def __A ( self ): _lowerCAmelCase : int = 10 def __A ( self ): _lowerCAmelCase : str = [1, 2, 3, 4] _lowerCAmelCase : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ ) def __A ( self ): _lowerCAmelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ ) def __A ( self ): _lowerCAmelCase : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ ) def __A ( self ): _lowerCAmelCase : Union[str, Any] = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" _lowerCAmelCase , _lowerCAmelCase : Optional[int] = process_story(a__ ) self.assertEqual(a__ , [] ) def __A ( self ): _lowerCAmelCase : List[Any] = """""" _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = process_story(a__ ) self.assertEqual(a__ , [] ) self.assertEqual(a__ , [] ) def __A ( self ): _lowerCAmelCase : str = ( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) _lowerCAmelCase , _lowerCAmelCase : List[str] = process_story(a__ ) _lowerCAmelCase : Union[str, Any] = [ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(a__ , a__ ) _lowerCAmelCase : List[str] = ["""It was the best of times."""] self.assertEqual(a__ , a__ ) def __A ( self ): _lowerCAmelCase : Any = torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase : Union[str, Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(a__ , 0 ).numpy() , expected.numpy() ) def __A ( self ): _lowerCAmelCase : Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(a__ , 23 ).numpy() , expected.numpy() ) def __A ( self ): _lowerCAmelCase : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(a__ , 1 ).numpy() , expected.numpy() ) def __A ( self ): _lowerCAmelCase : int = 101 _lowerCAmelCase : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _lowerCAmelCase : Dict = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase : int = compute_token_type_ids(a__ , a__ ) np.testing.assert_array_equal(a__ , a__ )
44
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } lowercase_ = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Union[str, Any]: for attribute in key.split('''.''' ): __a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: __a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: __a = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> int: __a = [] __a = fairseq_model.state_dict() __a = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __a = None for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) __a = True elif name.split('''.''' )[0] == "proj": __a = fairseq_model.proj __a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __a = True if "*" in mapped_key: __a = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] __a = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: __a = '''weight_g''' elif "weight_v" in name: __a = '''weight_v''' elif "bias" in name: __a = '''bias''' elif "weight" in name: __a = '''weight''' else: __a = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) return proj_weight def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> str: __a = full_name.split('''conv_layers.''' )[-1] __a = name.split('''.''' ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __a = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __a = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __a = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __a = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCAmelCase__ ) def lowercase ( lowerCAmelCase__ : int ) -> List[str]: __a , __a = emb.weight.shape __a = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ ) __a = emb.weight.data return lin_layer def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Tuple: with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f: __a = f.readlines() __a = [line.split(''' ''' )[0] for line in lines] __a = len(lowerCAmelCase__ ) __a = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(lowerCAmelCase__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , ) -> Union[str, Any]: __a = WavaVecaConfig.from_pretrained(lowerCAmelCase__ ) __a = SpeechaTextaConfig.from_pretrained( lowerCAmelCase__ , vocab_size=lowerCAmelCase__ , decoder_layers=lowerCAmelCase__ , do_stable_layer_norm=lowerCAmelCase__ ) __a = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __a = model[0].eval() # set weights for wav2vec2 encoder __a = WavaVecaModel(lowerCAmelCase__ ) __a = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ ) __a = SpeechaTextaForCausalLM(lowerCAmelCase__ ) __a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ ) # set output linear layer unexpected_keys.remove('''embed_out''' ) __a = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) __a = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ ) __a = False # add projection layer __a = nn.Parameter(projection_layer.weight ) __a = nn.Parameter(projection_layer.bias ) __a = create_vocab_dict(lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , '''vocab.json''' ) , '''w''' ) as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) __a = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase__ , '''vocab.json''' ) ) tokenizer.save_pretrained(lowerCAmelCase__ ) __a = hf_wavavec.config.to_dict() __a = tokenizer.pad_token_id __a = tokenizer.bos_token_id __a = tokenizer.eos_token_id __a = '''speech_to_text_2''' __a = '''wav2vec2''' __a = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) feature_extractor.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") lowercase_ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
45
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
46
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : List[str] = "▁" lowerCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"} lowerCamelCase : Tuple = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } lowerCamelCase : str = { "facebook/mbart-large-50-one-to-many-mmt": 1_0_2_4, } # fmt: off lowerCamelCase : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class A__ ( A__ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = ['input_ids', 'attention_mask'] A__ = [] A__ = [] def __init__( self : int , _a : Tuple , _a : Optional[int]=None , _a : str=None , _a : Tuple="</s>" , _a : List[str]="</s>" , _a : Any="<s>" , _a : Dict="<unk>" , _a : Optional[Any]="<pad>" , _a : Optional[int]="<mask>" , _a : Optional[Dict[str, Any]] = None , **_a : List[Any] , ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token _SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs _SCREAMING_SNAKE_CASE =kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_a , tgt_lang=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) _SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_a ) ) _SCREAMING_SNAKE_CASE =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _SCREAMING_SNAKE_CASE ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =len(self.sp_model ) _SCREAMING_SNAKE_CASE ={ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a ) } _SCREAMING_SNAKE_CASE ={v: k for k, v in self.lang_code_to_id.items()} _SCREAMING_SNAKE_CASE =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _SCREAMING_SNAKE_CASE ={v: k for k, v in self.fairseq_tokens_to_ids.items()} _SCREAMING_SNAKE_CASE =src_lang if src_lang is not None else 'en_XX' _SCREAMING_SNAKE_CASE =self.lang_code_to_id[self._src_lang] _SCREAMING_SNAKE_CASE =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A ( self : Dict ) -> int: '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def A ( self : List[Any] ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def A ( self : Optional[Any] , _a : str ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[Any] ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.__dict__.copy() _SCREAMING_SNAKE_CASE =None return state def __setstate__( self : Dict , _a : Dict ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _SCREAMING_SNAKE_CASE ={} _SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self : List[Any] ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A ( self : Dict , _a : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_a , out_type=_a ) def A ( self : str , _a : str ) -> int: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _SCREAMING_SNAKE_CASE =self.sp_model.PieceToId(_a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A ( self : Optional[int] , _a : int ) -> str: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A ( self : List[Any] , _a : str ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE ='' _SCREAMING_SNAKE_CASE =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =[] else: current_sub_tokens.append(_a ) _SCREAMING_SNAKE_CASE =False out_string += self.sp_model.decode(_a ) return out_string.strip() def A ( self : Optional[int] , _a : str , _a : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_a ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _SCREAMING_SNAKE_CASE =os.path.join( _a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , 'wb' ) as fi: _SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def A ( self : str , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) _SCREAMING_SNAKE_CASE =[1] * len(self.prefix_tokens ) _SCREAMING_SNAKE_CASE =[1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_a )) + suffix_ones return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones def A ( self : List[str] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self : Union[str, Any] , _a : Union[str, Any] , _a : str , _a : Optional[str] , _a : Optional[str] , **_a : Optional[Any] ) -> int: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _SCREAMING_SNAKE_CASE =src_lang _SCREAMING_SNAKE_CASE =self(_a , add_special_tokens=_a , return_tensors=_a , **_a ) _SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(_a ) _SCREAMING_SNAKE_CASE =tgt_lang_id return inputs def A ( self : Optional[Any] , _a : List[str] , _a : str = "en_XX" , _a : Optional[List[str]] = None , _a : str = "ro_RO" , **_a : Optional[Any] , ) -> BatchEncoding: '''simple docstring''' _SCREAMING_SNAKE_CASE =src_lang _SCREAMING_SNAKE_CASE =tgt_lang return super().prepare_seqaseq_batch(_a , _a , **_a ) def A ( self : int ) -> List[Any]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def A ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A ( self : Tuple , _a : str ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.lang_code_to_id[src_lang] _SCREAMING_SNAKE_CASE =[self.cur_lang_code_id] _SCREAMING_SNAKE_CASE =[self.eos_token_id] def A ( self : Optional[int] , _a : str ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.lang_code_to_id[tgt_lang] _SCREAMING_SNAKE_CASE =[self.cur_lang_code_id] _SCREAMING_SNAKE_CASE =[self.eos_token_id]
47
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
import random from .binary_exp_mod import bin_exp_mod def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCamelCase : List[Any] = n - 1 lowerCamelCase : Dict = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCamelCase : Optional[Any] = 0 while count < prec: lowerCamelCase : str = random.randint(2 ,n - 1 ) lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if b != 1: lowerCamelCase : str = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: lowerCamelCase : Tuple = False break lowerCamelCase : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
48
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0
__snake_case :Tuple = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] __snake_case :str = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] __snake_case :Any = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] __snake_case :List[str] = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] __snake_case :str = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] __snake_case :List[str] = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] __snake_case :int = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] __snake_case :str = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
49
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
0
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _UpperCAmelCase : str = logging.get_logger("""transformers.models.speecht5""") def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: hf_model.apply_weight_norm() lowerCamelCase__ : str = checkpoint['input_conv.weight_g'] lowerCamelCase__ : List[Any] = checkpoint['input_conv.weight_v'] lowerCamelCase__ : Union[str, Any] = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): lowerCamelCase__ : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""] lowerCamelCase__ : Optional[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""] lowerCamelCase__ : int = checkpoint[F"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): lowerCamelCase__ : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""] lowerCamelCase__ : Optional[int] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""] lowerCamelCase__ : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""] lowerCamelCase__ : Tuple = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""] lowerCamelCase__ : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""] lowerCamelCase__ : List[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""] lowerCamelCase__ : Tuple = checkpoint['output_conv.1.weight_g'] lowerCamelCase__ : Dict = checkpoint['output_conv.1.weight_v'] lowerCamelCase__ : int = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]: if config_path is not None: lowerCamelCase__ : List[Any] = SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: lowerCamelCase__ : Any = SpeechTaHifiGanConfig() lowerCamelCase__ : Tuple = SpeechTaHifiGan(_UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint['model']['generator'] , _UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase__ : List[str] = np.load(_UpperCAmelCase ) lowerCamelCase__ : Tuple = stats[0].reshape(-1 ) lowerCamelCase__ : Optional[Any] = stats[1].reshape(-1 ) lowerCamelCase__ : int = torch.from_numpy(_UpperCAmelCase ).float() lowerCamelCase__ : Dict = torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _UpperCAmelCase : Optional[Any] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
50
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : int = { "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class __snake_case ( a ): UpperCAmelCase__ : Dict = '''vit''' def __init__( self : Any , _snake_case : List[Any]=768 , _snake_case : Any=12 , _snake_case : List[Any]=12 , _snake_case : Union[str, Any]=3072 , _snake_case : Dict="gelu" , _snake_case : List[str]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Optional[Any]=0.0_2 , _snake_case : Tuple=1e-12 , _snake_case : Dict=224 , _snake_case : Tuple=16 , _snake_case : int=3 , _snake_case : List[str]=True , _snake_case : Optional[int]=16 , **_snake_case : Tuple , ): """simple docstring""" super().__init__(**_snake_case) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = encoder_stride class __snake_case ( a ): UpperCAmelCase__ : Optional[Any] = version.parse('''1.11''' ) @property def lowerCamelCase ( self : Optional[int]): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" return 1e-4
51
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCamelCase : Any = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class A__ ( unittest.TestCase ): _UpperCAmelCase :int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _UpperCAmelCase :Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _UpperCAmelCase :Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _UpperCAmelCase :Union[str, Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" ) UpperCamelCase : str = text_classifier("This is great !" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) UpperCamelCase : Optional[Any] = text_classifier("This is great !" , top_k=2 ) self.assertEqual( nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] ) UpperCamelCase : str = text_classifier(["This is great !", "This is bad"] , top_k=2 ) self.assertEqual( nested_simplify(A_ ) , [ [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}], [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}], ] , ) UpperCamelCase : str = text_classifier("This is great !" , top_k=1 ) self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) # Legacy behavior UpperCamelCase : str = text_classifier("This is great !" , return_all_scores=A_ ) self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) UpperCamelCase : Any = text_classifier("This is great !" , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] ) UpperCamelCase : Optional[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [ [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}], [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}], ] , ) UpperCamelCase : Union[str, Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [ {"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_0", "score": 0.5_04}, ] , ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : Tuple = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , ) UpperCamelCase : str = text_classifier("This is great !" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) @require_tf def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" ) UpperCamelCase : Tuple = text_classifier("This is great !" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] ) @slow @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = pipeline("text-classification" ) UpperCamelCase : Any = text_classifier("This is great !" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "POSITIVE", "score": 1.0}] ) UpperCamelCase : Dict = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "NEGATIVE", "score": 1.0}] ) UpperCamelCase : List[Any] = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "POSITIVE", "score": 0.9_88}] ) @slow @require_tf def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = pipeline("text-classification" , framework="tf" ) UpperCamelCase : Dict = text_classifier("This is great !" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "POSITIVE", "score": 1.0}] ) UpperCamelCase : Dict = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "NEGATIVE", "score": 1.0}] ) UpperCamelCase : Optional[int] = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(A_ ) , [{"label": "POSITIVE", "score": 0.9_88}] ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = TextClassificationPipeline(model=A_ , tokenizer=A_ ) return text_classifier, ["HuggingFace is in", "This is another test"] def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCamelCase : Optional[int] = "HuggingFace is in" UpperCamelCase : List[Any] = text_classifier(A_ ) self.assertEqual(nested_simplify(A_ ) , [{"label": ANY(A_ ), "score": ANY(A_ )}] ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) UpperCamelCase : Union[str, Any] = ["HuggingFace is in ", "Paris is in France"] UpperCamelCase : Optional[int] = text_classifier(A_ ) self.assertEqual( nested_simplify(A_ ) , [{"label": ANY(A_ ), "score": ANY(A_ )}, {"label": ANY(A_ ), "score": ANY(A_ )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCamelCase : List[str] = text_classifier(A_ , top_k=A_ ) UpperCamelCase : int = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(A_ ) , [[{"label": ANY(A_ ), "score": ANY(A_ )}] * N, [{"label": ANY(A_ ), "score": ANY(A_ )}] * N] , ) UpperCamelCase : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} UpperCamelCase : List[Any] = text_classifier(A_ ) self.assertEqual( nested_simplify(A_ ) , {"label": ANY(A_ ), "score": ANY(A_ )} , ) self.assertTrue(outputs["label"] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCamelCase : int = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(A_ ): text_classifier(A_ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCamelCase : Any = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] ) self.assertEqual( nested_simplify(A_ ) , [{"label": ANY(A_ ), "score": ANY(A_ )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
52
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : str =field( metadata={"help": "The output directory where the model will be written."} , ) SCREAMING_SNAKE_CASE_ : str =field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } , ) SCREAMING_SNAKE_CASE_ : str =field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def lowercase__ ( ) -> Any: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments,) ) ((__UpperCamelCase) , ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: __UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: __UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: __UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: __UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__lowercase , decoder_config=__lowercase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens __UpperCamelCase = decoder_config.decoder_start_token_id __UpperCamelCase = decoder_config.pad_token_id if decoder_start_token_id is None: __UpperCamelCase = decoder_config.bos_token_id if pad_token_id is None: __UpperCamelCase = decoder_config.eos_token_id # This is necessary to make Flax's generate() work __UpperCamelCase = decoder_config.eos_token_id __UpperCamelCase = decoder_start_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) __UpperCamelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) __UpperCamelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
53
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
0
"""simple docstring""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' with open(lowerCAmelCase_ ) as metadata_file: __SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=lowerCAmelCase_ , **metadata["model_config"] ) # Load in the weights from the checkpoint_path __SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ , map_location="cpu" ) # Load the entity vocab file __SCREAMING_SNAKE_CASE = load_entity_vocab(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks __SCREAMING_SNAKE_CASE = AddedToken("<ent>" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = AddedToken("<ent2>" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(lowerCAmelCase_ ) with open(os.path.join(lowerCAmelCase_ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(lowerCAmelCase_ ) # Initialize the embeddings of the special tokens __SCREAMING_SNAKE_CASE = state_dict["embeddings.word_embeddings.weight"] __SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) __SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) __SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __SCREAMING_SNAKE_CASE = f"""encoder.layer.{layer_index}.attention.self.""" __SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] __SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] __SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __SCREAMING_SNAKE_CASE = state_dict["entity_embeddings.entity_embeddings.weight"] __SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["[MASK]"]] __SCREAMING_SNAKE_CASE = LukeModel(config=lowerCAmelCase_ ).eval() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) if not (len(lowerCAmelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"""Missing keys {', '.join(lowerCAmelCase_ )}. Expected only missing embeddings.position_ids""" ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" ) # Check outputs __SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(lowerCAmelCase_ , task="entity_classification" ) __SCREAMING_SNAKE_CASE = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) __SCREAMING_SNAKE_CASE = (39, 42) __SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , entity_spans=[span] , add_prefix_space=lowerCAmelCase_ , return_tensors="pt" ) __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) # Verify word hidden states if model_size == "large": __SCREAMING_SNAKE_CASE = torch.Size((1, 42, 1024) ) __SCREAMING_SNAKE_CASE = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base __SCREAMING_SNAKE_CASE = torch.Size((1, 42, 768) ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __SCREAMING_SNAKE_CASE = torch.Size((1, 1, 1024) ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base __SCREAMING_SNAKE_CASE = torch.Size((1, 1, 768) ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowerCAmelCase_ ) ) model.save_pretrained(lowerCAmelCase_ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} with open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f: for index, line in enumerate(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = line.rstrip().split("\t" ) __SCREAMING_SNAKE_CASE = index return entity_vocab if __name__ == "__main__": a__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : int = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
54
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed a_ : Dict = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) a_ : int = """sshleifer/student_marian_en_ro_6_1""" a_ : str = """sshleifer/tiny-mbart""" @require_torch class snake_case ( lowercase ): """simple docstring""" def snake_case ( self , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , ): """simple docstring""" lowerCamelCase_ = self.run_trainer( eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , ) lowerCamelCase_ = TrainerState.load_from_json(os.path.join(UpperCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return lowerCamelCase_ = [log for log in logs if "eval_loss" in log.keys()] lowerCamelCase_ = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats lowerCamelCase_ = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , UpperCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def snake_case ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def snake_case ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=UpperCamelCase ) @require_torch_multi_gpu def snake_case ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=UpperCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def snake_case ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def snake_case ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def snake_case ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=UpperCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def snake_case ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=UpperCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=UpperCamelCase ) @require_apex @require_torch_gpu def snake_case ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def snake_case ( self , UpperCamelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout lowerCamelCase_ = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } lowerCamelCase_ = experiments[experiment_id] lowerCamelCase_ = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} lowerCamelCase_ = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data["extra_args_str"] ) lowerCamelCase_ = len(re.findall(UpperCamelCase , cl.err ) ) self.assertEqual(UpperCamelCase , data["n_matches"] ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.run_trainer( eval_steps=2 , max_len=128 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , ) # Check metrics lowerCamelCase_ = TrainerState.load_from_json(os.path.join(UpperCamelCase , "trainer_state.json" ) ).log_history lowerCamelCase_ = [log for log in logs if "eval_loss" in log.keys()] lowerCamelCase_ = eval_metrics[0] lowerCamelCase_ = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , UpperCamelCase ) # test if do_predict saves generations and metrics lowerCamelCase_ = os.listdir(UpperCamelCase ) lowerCamelCase_ = {os.path.basename(UpperCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def snake_case ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(UpperCamelCase ) -> Tuple[int, float]: lowerCamelCase_ = "--skip_memory_metrics 0" lowerCamelCase_ = self.run_trainer( max_len=128 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , ) # Check metrics lowerCamelCase_ = TrainerState.load_from_json(Path(UpperCamelCase , "trainer_state.json" ) ).log_history lowerCamelCase_ = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) lowerCamelCase_ = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) lowerCamelCase_ = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) lowerCamelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb lowerCamelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig lowerCamelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb lowerCamelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings lowerCamelCase_ = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( UpperCamelCase , UpperCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( UpperCamelCase , UpperCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( UpperCamelCase , UpperCamelCase , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 3e-3 , UpperCamelCase = "adafactor" , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 0 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = f''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(UpperCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(UpperCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() lowerCamelCase_ = f''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(UpperCamelCase )} '''.split() lowerCamelCase_ = "\n --do_predict\n ".split() lowerCamelCase_ = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: lowerCamelCase_ = get_gpu_count() lowerCamelCase_ = get_torch_dist_unique_port() lowerCamelCase_ = f''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() lowerCamelCase_ = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(UpperCamelCase , env=self.get_env() ) else: lowerCamelCase_ = ["run_translation.py"] + args with patch.object(UpperCamelCase , "argv" , UpperCamelCase ): main() return output_dir
55
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
'''simple docstring''' from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False ) -> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(__UpperCAmelCase ), magnitude * sin(__UpperCAmelCase )] return [magnitude * cos(radians(__UpperCAmelCase ) ), magnitude * sin(radians(__UpperCAmelCase ) )] def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 10**-1 ) -> bool: '''simple docstring''' snake_case_ = cross(__UpperCAmelCase, __UpperCAmelCase ) snake_case_ = sum(__UpperCAmelCase ) return abs(__UpperCAmelCase ) < eps if __name__ == "__main__": # Test to check if it works a : Tuple = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg a : Union[str, Any] = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) a : List[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg a : List[Any] = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) a : str = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
56
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def snake_case ( self ): __lowerCAmelCase = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) __lowerCAmelCase = load_dataset("ashraq/esc50" ) __lowerCAmelCase = dataset["train"]["audio"][-1]["array"] __lowerCAmelCase = audio_classifier(__a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(__a ) , [{"score": 0.5_0_1, "label": "Sound of a dog"}, {"score": 0.4_9_9, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def snake_case ( self ): pass @slow @require_torch def snake_case ( self ): __lowerCAmelCase = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog __lowerCAmelCase = load_dataset("ashraq/esc50" ) __lowerCAmelCase = dataset["train"]["audio"][-1]["array"] __lowerCAmelCase = audio_classifier(__a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(__a ) , [ {"score": 0.9_9_9, "label": "Sound of a dog"}, {"score": 0.0_0_1, "label": "Sound of vaccum cleaner"}, ] , ) __lowerCAmelCase = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(__a ) , [ [ {"score": 0.9_9_9, "label": "Sound of a dog"}, {"score": 0.0_0_1, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) __lowerCAmelCase = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(__a ) , [ [ {"score": 0.9_9_9, "label": "Sound of a dog"}, {"score": 0.0_0_1, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def snake_case ( self ): pass
57
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
0
'''simple docstring''' class a_ : '''simple docstring''' def __init__( self , A ) -> None: _SCREAMING_SNAKE_CASE = size _SCREAMING_SNAKE_CASE = [0] * size _SCREAMING_SNAKE_CASE = [0] * size @staticmethod def snake_case_( A ) -> int: return index | (index + 1) @staticmethod def snake_case_( A ) -> int: return (index & (index + 1)) - 1 def snake_case_( self , A , A ) -> None: _SCREAMING_SNAKE_CASE = value while index < self.size: _SCREAMING_SNAKE_CASE = self.get_prev(A ) + 1 if current_left_border == index: _SCREAMING_SNAKE_CASE = value else: _SCREAMING_SNAKE_CASE = max(A , A , A ) _SCREAMING_SNAKE_CASE = self.get_next(A ) def snake_case_( self , A , A ) -> int: right -= 1 # Because of right is exclusive _SCREAMING_SNAKE_CASE = 0 while left <= right: _SCREAMING_SNAKE_CASE = self.get_prev(A ) if left <= current_left: _SCREAMING_SNAKE_CASE = max(A , self.tree[right] ) _SCREAMING_SNAKE_CASE = current_left else: _SCREAMING_SNAKE_CASE = max(A , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
58
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCAmelCase : def __init__(self : Tuple , snake_case__ : int , snake_case__ : Optional[int]=13 , snake_case__ : List[Any]=7 , snake_case__ : Tuple=True , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=99 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=5 , snake_case__ : Any=4 , snake_case__ : str=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : int=50 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=True , snake_case__ : List[Any]=None , ) -> Tuple: '''simple docstring''' snake_case : Optional[Any] = parent snake_case : Optional[int] = batch_size snake_case : Optional[int] = seq_length snake_case : List[Any] = is_training snake_case : List[Any] = use_input_mask snake_case : Optional[int] = vocab_size snake_case : Optional[int] = hidden_size snake_case : List[Any] = num_hidden_layers snake_case : List[str] = num_attention_heads snake_case : Optional[Any] = intermediate_size snake_case : Optional[Any] = hidden_act snake_case : str = hidden_dropout_prob snake_case : Optional[Any] = attention_probs_dropout_prob snake_case : Union[str, Any] = max_position_embeddings snake_case : List[str] = initializer_range snake_case : Optional[Any] = use_labels snake_case : Dict = scope def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]: '''simple docstring''' snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case : List[Any] = None if self.use_input_mask: snake_case : int = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case : Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def _SCREAMING_SNAKE_CASE (self : int ) -> int: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict: '''simple docstring''' ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) : Tuple = self.prepare_config_and_inputs() snake_case : Any = True snake_case : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , **snake_case__ : int , ) -> str: '''simple docstring''' snake_case : int = BertGenerationEncoder(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : str = model(snake_case__ , attention_mask=snake_case__ ) snake_case : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : int , **snake_case__ : Optional[Any] , ) -> Optional[Any]: '''simple docstring''' snake_case : str = True snake_case : int = BertGenerationEncoder(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : Dict = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , ) snake_case : Optional[Any] = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : str , **snake_case__ : str , ) -> Dict: '''simple docstring''' snake_case : Optional[int] = True snake_case : Union[str, Any] = True snake_case : Optional[int] = BertGenerationDecoder(config=snake_case__ ).to(snake_case__ ).eval() # first forward pass snake_case : Optional[Any] = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , ) snake_case : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case : Tuple = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] snake_case : Dict = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] # select random slice snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[Any] , *snake_case__ : List[Any] , ) -> str: '''simple docstring''' snake_case : str = BertGenerationDecoder(snake_case__ ) model.to(snake_case__ ) model.eval() snake_case : Optional[Any] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]: '''simple docstring''' snake_case , snake_case , snake_case , snake_case : List[Any] = self.prepare_config_and_inputs() snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ): A__ : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () A__ : Any = (BertGenerationDecoder,) if is_torch_available() else () A__ : str = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any: '''simple docstring''' snake_case : Dict = BertGenerationEncoderTester(self ) snake_case : List[Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any: '''simple docstring''' snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int: '''simple docstring''' snake_case , snake_case , snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() snake_case : List[Any] = "bert" self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int: '''simple docstring''' snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict: '''simple docstring''' snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any: '''simple docstring''' ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case : Dict = None self.model_tester.create_and_check_model_as_decoder( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) def _SCREAMING_SNAKE_CASE (self : int ) -> int: '''simple docstring''' snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*snake_case__ ) @slow def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple: '''simple docstring''' snake_case : Optional[int] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(snake_case__ ) @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any: '''simple docstring''' snake_case : Tuple = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) snake_case : Dict = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] ) with torch.no_grad(): snake_case : List[str] = model(snake_case__ )[0] snake_case : List[str] = torch.Size([1, 8, 10_24] ) self.assertEqual(output.shape , snake_case__ ) snake_case : int = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) ) @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple: '''simple docstring''' snake_case : List[Any] = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) snake_case : List[str] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] ) with torch.no_grad(): snake_case : List[Any] = model(snake_case__ )[0] snake_case : List[Any] = torch.Size([1, 8, 5_03_58] ) self.assertEqual(output.shape , snake_case__ ) snake_case : Union[str, Any] = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
59
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
0
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def _snake_case ( _snake_case : int = 1000000 , _snake_case : int = 10 ): lowerCAmelCase : defaultdict = defaultdict(_snake_case ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: lowerCAmelCase : int = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: lowerCAmelCase : Any = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_snake_case , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"""{solution() = }""")
60
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _a = logging.get_logger(__name__) # pylint: disable=invalid-name _a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ): UpperCAmelCase_ : Union[str, Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase_ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" if latents is None: UpperCAmelCase_ : List[Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) UpperCAmelCase_ : int = latents.to(lowercase_ ) UpperCAmelCase_ : List[Any] = latents * scheduler.init_noise_sigma return latents def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ : Dict = torch.device(F"""cuda:{gpu_id}""" ) UpperCAmelCase_ : Optional[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) UpperCAmelCase_ : Tuple = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : int = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase_ : Dict = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase__ ( self ): """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self._execution_device UpperCAmelCase_ : Dict = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[int] = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase_ : List[str] = image_embeds.shape[0] * num_images_per_prompt if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Any = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Optional[Any] = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : Tuple = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase_ : Any = self.scheduler.timesteps UpperCAmelCase_ : List[Any] = self.unet.config.in_channels UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) # create initial latent UpperCAmelCase_ : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds} UpperCAmelCase_ : Optional[Any] = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = variance_pred.chunk(2 ) UpperCAmelCase_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : Dict = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase_ : Optional[Any] = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase_ : Any = image * 0.5 + 0.5 UpperCAmelCase_ : Tuple = image.clamp(0 , 1 ) UpperCAmelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : Tuple = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
61
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
0
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging _A = logging.get_logger(__name__) logging.set_verbosity_info() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __UpperCamelCase =XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =XLMProphetNetForConditionalGeneration.from_pretrained( SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ ) else: __UpperCamelCase =ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =ProphetNetForConditionalGeneration.from_pretrained( SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =['key_proj', 'value_proj', 'query_proj'] __UpperCamelCase ={ 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: __UpperCamelCase =key.split('.' ) if attributes[0] == "lm_head": __UpperCamelCase =prophet __UpperCamelCase =prophet_old else: __UpperCamelCase =prophet.prophetnet __UpperCamelCase =prophet_old.model __UpperCamelCase =False for attribute in attributes: if attribute in mapping: __UpperCamelCase =mapping[attribute] if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0: __UpperCamelCase =attribute elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __UpperCamelCase =old_model.weight logger.info(F'{attribute} is initialized.' ) __UpperCamelCase =True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __UpperCamelCase =old_model.bias logger.info(F'{attribute} is initialized' ) __UpperCamelCase =True break elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE__ , 'in_proj_weight' ): __UpperCamelCase =old_model.in_proj_weight.shape[0] // 3 __UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __UpperCamelCase =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __UpperCamelCase =nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __UpperCamelCase =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __UpperCamelCase =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __UpperCamelCase =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __UpperCamelCase =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __UpperCamelCase =True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." __UpperCamelCase =nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) __UpperCamelCase =True break if attribute.isdigit(): __UpperCamelCase =model[int(SCREAMING_SNAKE_CASE__ )] __UpperCamelCase =old_model[int(SCREAMING_SNAKE_CASE__ )] else: __UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if old_attribute == "": __UpperCamelCase =old_model else: if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError(F'{old_model} does not have {old_attribute}' ) __UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_key_init: raise ValueError(F'{key} was not correctly initialized!' ) print(F'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _A = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
62
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
'''simple docstring''' def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : int = 0 , lowercase : int = 0 ) -> int: _a = right or len(lowercase ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(lowercase , lowercase , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
63
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
0
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : list[int] ): """simple docstring""" if not len(snake_case__ ) == len(snake_case__ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients _snake_case , _snake_case , _snake_case : Optional[Any] = equationa _snake_case , _snake_case , _snake_case : Optional[Any] = equationa # Calculate the determinants of the matrices _snake_case : Any = aa * ba - aa * ba _snake_case : Optional[int] = ca * ba - ca * ba _snake_case : Tuple = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _snake_case : int = determinant_x / determinant _snake_case : List[str] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
64
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' UpperCAmelCase__ = filter(lambda __A : p.requires_grad, model.parameters() ) UpperCAmelCase__ = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCamelCase__ = logging.getLogger(__name__) def lowerCAmelCase_ ( __A, __A ) -> str: '''simple docstring''' if metric == "rouge2": UpperCAmelCase__ = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": UpperCAmelCase__ = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": UpperCAmelCase__ = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" " function." ) UpperCAmelCase__ = ModelCheckpoint( dirpath=__A, filename=__A, monitor=f"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, ) return checkpoint_callback def lowerCAmelCase_ ( __A, __A ) -> List[Any]: '''simple docstring''' return EarlyStopping( monitor=f"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=__A, verbose=__A, ) class A ( pl.Callback ): def lowercase_ (self : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase__ = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__UpperCAmelCase ) @rank_zero_only def lowercase_ (self : str , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=True ) -> None: """simple docstring""" logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) UpperCAmelCase__ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results UpperCAmelCase__ = Path(pl_module.hparams.output_dir ) if type_path == "test": UpperCAmelCase__ = od / "test_results.txt" UpperCAmelCase__ = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. UpperCAmelCase__ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" UpperCAmelCase__ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=__UpperCAmelCase ) generations_file.parent.mkdir(exist_ok=__UpperCAmelCase ) with open(__UpperCAmelCase , "a+" ) as writer: for key in sorted(__UpperCAmelCase ): if key in ["log", "progress_bar", "preds"]: continue UpperCAmelCase__ = metrics[key] if isinstance(__UpperCAmelCase , torch.Tensor ): UpperCAmelCase__ = val.item() UpperCAmelCase__ = f"""{key}: {val:.6f}\n""" writer.write(__UpperCAmelCase ) if not save_generations: return if "preds" in metrics: UpperCAmelCase__ = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__UpperCAmelCase ) @rank_zero_only def lowercase_ (self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: UpperCAmelCase__ = pl_module.model.model.num_parameters() except AttributeError: UpperCAmelCase__ = pl_module.model.num_parameters() UpperCAmelCase__ = count_trainable_parameters(__UpperCAmelCase ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def lowercase_ (self : List[Any] , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule ) -> Union[str, Any]: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__UpperCAmelCase , __UpperCAmelCase , "test" ) @rank_zero_only def lowercase_ (self : Tuple , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : Union[str, Any] ) -> int: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
65
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
0
"""simple docstring""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = torch.load(_lowercase, map_location="""cpu""" ) snake_case_ :Any = chkpt["""model"""] # We have the base model one level deeper than the original XLM repository snake_case_ :Dict = {} for k, v in state_dict.items(): if "pred_layer" in k: snake_case_ :Optional[Any] = v else: snake_case_ :List[str] = v snake_case_ :List[Any] = chkpt["""params"""] snake_case_ :str = {n: v for n, v in config.items() if not isinstance(_lowercase, (torch.FloatTensor, numpy.ndarray) )} snake_case_ :List[Any] = chkpt["""dico_word2id"""] snake_case_ :Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""", """""" ): i for s, i in vocab.items()} # Save pytorch-model snake_case_ :Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME snake_case_ :List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME snake_case_ :Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(_lowercase, _lowercase ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_lowercase, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(_lowercase, indent=2 ) + """\n""" ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(_lowercase, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(_lowercase, indent=2 ) + """\n""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __a = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
66
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel __UpperCAmelCase ={ "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } __UpperCAmelCase =AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ) -> List[str]: __lowerCamelCase , __lowerCamelCase = create_model( '''HTSAT-tiny''' , '''roberta''' , UpperCamelCase__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=UpperCamelCase__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]: __lowerCamelCase = {} __lowerCamelCase = r'''.*sequential.(\d+).*''' __lowerCamelCase = r'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __lowerCamelCase = key.replace(UpperCamelCase__ , UpperCamelCase__ ) if re.match(UpperCamelCase__ , UpperCamelCase__ ): # replace sequential layers with list __lowerCamelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).group(1 ) __lowerCamelCase = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(UpperCamelCase__ )//3}.linear.""" ) elif re.match(UpperCamelCase__ , UpperCamelCase__ ): __lowerCamelCase = int(re.match(UpperCamelCase__ , UpperCamelCase__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __lowerCamelCase = 1 if projecton_layer == 0 else 2 __lowerCamelCase = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value __lowerCamelCase = value __lowerCamelCase = mixed_qkv.size(0 ) // 3 __lowerCamelCase = mixed_qkv[:qkv_dim] __lowerCamelCase = mixed_qkv[qkv_dim : qkv_dim * 2] __lowerCamelCase = mixed_qkv[qkv_dim * 2 :] __lowerCamelCase = query_layer __lowerCamelCase = key_layer __lowerCamelCase = value_layer else: __lowerCamelCase = value return model_state_dict def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = init_clap(UpperCamelCase__ , enable_fusion=UpperCamelCase__ ) clap_model.eval() __lowerCamelCase = clap_model.state_dict() __lowerCamelCase = rename_state_dict(UpperCamelCase__ ) __lowerCamelCase = ClapConfig() __lowerCamelCase = enable_fusion __lowerCamelCase = ClapModel(UpperCamelCase__ ) # ignore the spectrogram embedding layer model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) transformers_config.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __UpperCAmelCase =argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") __UpperCAmelCase =parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
67
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { """configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""], """tokenization_roc_bert""": ["""RoCBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoCBertForCausalLM""", """RoCBertForMaskedLM""", """RoCBertForMultipleChoice""", """RoCBertForPreTraining""", """RoCBertForQuestionAnswering""", """RoCBertForSequenceClassification""", """RoCBertForTokenClassification""", """RoCBertLayer""", """RoCBertModel""", """RoCBertPreTrainedModel""", """load_tf_weights_in_roc_bert""", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class UpperCamelCase : def __init__( self, lowerCAmelCase__, ) -> List[str]: snake_case_ = parent snake_case_ = 13 snake_case_ = 7 snake_case_ = 30 snake_case_ = self.seq_length + self.mem_len snake_case_ = 15 snake_case_ = True snake_case_ = True snake_case_ = 99 snake_case_ = [10, 50, 80] snake_case_ = 32 snake_case_ = 32 snake_case_ = 4 snake_case_ = 8 snake_case_ = 128 snake_case_ = 2 snake_case_ = 2 snake_case_ = None snake_case_ = 1 snake_case_ = 0 snake_case_ = 3 snake_case_ = self.vocab_size - 1 snake_case_ = 0.01 def a_ ( self) -> int: snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) snake_case_ = TransfoXLConfig( vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.vocab_size - 1, init_range=self.init_range, num_labels=self.num_labels, ) return (config, input_ids_a, input_ids_a, lm_labels) def a_ ( self) -> int: random.seed(self.seed) tf.random.set_seed(self.seed) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = TFTransfoXLModel(lowerCAmelCase__) snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple() snake_case_ = {'input_ids': input_ids_a, 'mems': mems_a} snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple() self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertListEqual( [mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = TFTransfoXLLMHeadModel(lowerCAmelCase__) snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple() snake_case_ = {'input_ids': input_ids_a, 'labels': lm_labels} snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple() snake_case_ , snake_case_ = model([input_ids_a, mems_a]).to_tuple() snake_case_ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels} snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple() self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str: snake_case_ = TFTransfoXLForSequenceClassification(lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def a_ ( self) -> Optional[int]: snake_case_ = self.prepare_config_and_inputs() ((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs snake_case_ = {'input_ids': input_ids_a} return config, inputs_dict @require_tf class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) SCREAMING_SNAKE_CASE_ = () if is_tf_available() else () SCREAMING_SNAKE_CASE_ = ( { "feature-extraction": TFTransfoXLModel, "text-classification": TFTransfoXLForSequenceClassification, "text-generation": TFTransfoXLLMHeadModel, "zero-shot": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict: if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def a_ ( self) -> List[Any]: snake_case_ = TFTransfoXLModelTester(self) snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, d_embed=37) def a_ ( self) -> Dict: self.config_tester.run_common_tests() def a_ ( self) -> Dict: self.model_tester.set_seed() snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase__) def a_ ( self) -> Tuple: self.model_tester.set_seed() snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase__) def a_ ( self) -> Union[str, Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase__) def a_ ( self) -> Any: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: snake_case_ = model.get_output_embeddings() assert isinstance(lowerCAmelCase__, tf.keras.layers.Layer) snake_case_ = model.get_bias() assert name is None else: snake_case_ = model.get_output_embeddings() assert x is None snake_case_ = model.get_bias() assert name is None def a_ ( self) -> Optional[int]: # TODO JP: Make TransfoXL XLA compliant pass @slow def a_ ( self) -> str: for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = TFTransfoXLModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) @unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.') def a_ ( self) -> Union[str, Any]: pass @require_tf class UpperCamelCase ( unittest.TestCase ): @unittest.skip('Skip test until #12651 is resolved.') @slow def a_ ( self) -> List[Any]: snake_case_ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103') # fmt: off snake_case_ = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]], dtype=tf.intaa) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off snake_case_ = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> snake_case_ = model.generate(lowerCAmelCase__, max_length=200, do_sample=lowerCAmelCase__) self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase__)
69
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A__ : List[Any] =logging.get_logger(__name__) A__ : Any =torch.device('''cpu''') def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return im def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = dct.pop(lowerCAmelCase ) _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for k in state_dict.keys(): _lowerCAmelCase = k if ".pwconv" in k: _lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _lowerCAmelCase = k_new.split(""".""" ) if ls[2].isdigit(): _lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _lowerCAmelCase = 10_00 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _lowerCAmelCase = [3, 3, 6, 4] _lowerCAmelCase = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": _lowerCAmelCase = [3, 3, 9, 6] _lowerCAmelCase = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": _lowerCAmelCase = [4, 3, 10, 5] _lowerCAmelCase = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": _lowerCAmelCase = [4, 4, 12, 6] _lowerCAmelCase = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase ) else: _lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" ) _lowerCAmelCase = checkpoint _lowerCAmelCase = create_rename_keys(lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # load HuggingFace model _lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval() hf_model.load_state_dict(lowerCAmelCase ) # prepare test inputs _lowerCAmelCase = prepare_img() _lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" ) # compare outputs from both models _lowerCAmelCase = get_expected_output(lowerCAmelCase ) _lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 ) Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": A__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') A__ : Tuple =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
70
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0
import fire from utils import calculate_rouge, save_json def A ( a_ ,a_ ,a_=None ,**a_ ) -> Tuple: __UpperCamelCase : Dict =[x.strip() for x in open(a_ ).readlines()] __UpperCamelCase : str =[x.strip() for x in open(a_ ).readlines()][: len(a_ )] __UpperCamelCase : Union[str, Any] =calculate_rouge(a_ ,a_ ,**a_ ) if save_path is not None: save_json(a_ ,a_ ,indent=a_ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
71
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
0
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
from __future__ import annotations from random import random class A_ : def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : int | None = None): __lowerCamelCase : Union[str, Any] = value __lowerCamelCase : int = random() __lowerCamelCase : Node | None = None __lowerCamelCase : Node | None = None def __repr__( self : List[Any]): from pprint import pformat if self.left is None and self.right is None: return F"'{self.value}: {self.prior:.5}'" else: return pformat( {F"{self.value}: {self.prior:.5}": (self.left, self.right)} ,indent=1) def __str__( self : Union[str, Any]): __lowerCamelCase : Optional[int] = str(self.value) + ' ' __lowerCamelCase : Optional[Any] = str(self.left or '') __lowerCamelCase : List[Any] = str(self.right or '') return value + left + right def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[Node | None, Node | None]: if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: __lowerCamelCase , __lowerCamelCase : Union[str, Any] = split(root.left , lowerCamelCase__ ) return left, root else: __lowerCamelCase , __lowerCamelCase : List[str] = split(root.right , lowerCamelCase__ ) return root, right def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None: if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: __lowerCamelCase : Optional[Any] = merge(left.right , lowerCamelCase__ ) return left else: __lowerCamelCase : List[str] = merge(lowerCamelCase__ , right.left ) return right def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None: __lowerCamelCase : List[Any] = Node(lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase : Optional[int] = split(lowerCamelCase__ , lowerCamelCase__ ) return merge(merge(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None: __lowerCamelCase , __lowerCamelCase : List[str] = split(lowerCamelCase__ , value - 1 ) __lowerCamelCase , __lowerCamelCase : Any = split(lowerCamelCase__ , lowerCamelCase__ ) return merge(lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: if not root: # None return else: inorder(root.left ) print(root.value , end=',' ) inorder(root.right ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None: for arg in args.split(): if arg[0] == "+": __lowerCamelCase : str = insert(lowerCamelCase__ , int(arg[1:] ) ) elif arg[0] == "-": __lowerCamelCase : List[Any] = erase(lowerCamelCase__ , int(arg[1:] ) ) else: print('Unknown command' ) return root def SCREAMING_SNAKE_CASE__ ( ) -> None: __lowerCamelCase : int = None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) __lowerCamelCase : Optional[int] = input() while args != "q": __lowerCamelCase : Optional[int] = interact_treap(lowerCamelCase__ , lowerCamelCase__ ) print(lowerCamelCase__ ) __lowerCamelCase : int = input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
73
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: str = "utf-8" _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True # deprecated _lowerCamelCase: Optional[int] = None # deprecated _lowerCamelCase: int = 10 << 20 # 10MB _lowerCamelCase: Optional[bool] = None class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Optional[int] = JsonConfig def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' ) A = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' ) if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' ) return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str] ) -> List[Any]: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : str ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A = self.config.features.arrow_schema.field(A_ ).type A = pa_table.append_column(A_ ,pa.array([None] * len(A_ ) ,type=A_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A = table_cast(A_ ,self.config.features.arrow_schema ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ) -> List[str]: for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(A_ ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f: A = json.load(A_ ) # We keep only the field we are interested in A = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(A_ ,(list, tuple) ): A = set().union(*[row.keys() for row in dataset] ) A = {col: [row.get(A_ ) for row in dataset] for col in keys} else: A = dataset A = pa.Table.from_pydict(A_ ) yield file_idx, self._cast_table(A_ ) # If the file has one json object per line else: with open(A_ ,'rb' ) as f: A = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A = max(self.config.chunksize // 32 ,16 << 10 ) A = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: A = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(A_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A = batch.decode(self.config.encoding ,errors=A_ ).encode('utf-8' ) try: while True: try: A = paj.read_json( io.BytesIO(A_ ) ,read_options=paj.ReadOptions(block_size=A_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(A_ ,pa.ArrowInvalid ) and "straddling" not in str(A_ ) or block_size > len(A_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'Batch of {len(A_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( A_ ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f: A = json.load(A_ ) except json.JSONDecodeError: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(A_ ,A_ ): # list is the only sequence type supported in JSON try: A = set().union(*[row.keys() for row in dataset] ) A = {col: [row.get(A_ ) for row in dataset] for col in keys} A = pa.Table.from_pydict(A_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(A_ ) break else: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise ValueError( F'Not able to read records in the JSON file at {file}. ' F'You should probably indicate the field of the JSON file containing your records. ' F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) batch_idx += 1
74
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModel.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModel.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForPreTraining.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =AutoModelForCausalLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForMaskedLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =AutoModelForMaskedLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 ) lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 ) lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
75
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a_ = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['PerceiverFeatureExtractor'] a_ = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y ) def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( _lowerCAmelCase : int = 20 ): '''simple docstring''' lowercase__ : Dict = 1 for i in range(1 , n + 1 ): lowercase__ : List[Any] = lcm(_lowerCAmelCase , _lowerCAmelCase ) return g if __name__ == "__main__": print(f'''{solution() = }''')
77
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" from __future__ import annotations import pandas as pd def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = [0] * no_of_processes UpperCAmelCase = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(lowercase_ ): UpperCAmelCase = burst_time[i] UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 999999999 UpperCAmelCase = 0 UpperCAmelCase = False # Process until all processes are completed while complete != no_of_processes: for j in range(lowercase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: UpperCAmelCase = remaining_time[j] UpperCAmelCase = j UpperCAmelCase = True if not check: increment_time += 1 continue remaining_time[short] -= 1 UpperCAmelCase = remaining_time[short] if minm == 0: UpperCAmelCase = 999999999 if remaining_time[short] == 0: complete += 1 UpperCAmelCase = False # Find finish time of current process UpperCAmelCase = increment_time + 1 # Calculate waiting time UpperCAmelCase = finish_time - arrival_time[short] UpperCAmelCase = finar - burst_time[short] if waiting_time[short] < 0: UpperCAmelCase = 0 # Increment time increment_time += 1 return waiting_time def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = [0] * no_of_processes for i in range(lowercase_ ): UpperCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = 0 UpperCAmelCase = 0 for i in range(lowercase_ ): UpperCAmelCase = total_waiting_time + waiting_time[i] UpperCAmelCase = total_turn_around_time + turn_around_time[i] print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print('Average turn around time =' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("""Enter how many process you want to analyze""") snake_case_ = int(input()) snake_case_ = [0] * no_of_processes snake_case_ = [0] * no_of_processes snake_case_ = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("""Enter the arrival time and burst time for process:--""" + str(i + 1)) snake_case_ , snake_case_ = map(int, input().split()) snake_case_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) snake_case_ = burst_time snake_case_ = no_of_processes snake_case_ = waiting_time snake_case_ = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) snake_case_ = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ """Process""", """BurstTime""", """ArrivalTime""", """WaitingTime""", """TurnAroundTime""", ], ) # Printing the dataFrame pd.set_option("""display.max_rows""", fcfs.shape[0] + 1) print(fcfs)
78
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
'''simple docstring''' import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' _A = [ "decoder.version", "decoder.output_projection.weight", "_float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase ) -> Any: '''simple docstring''' _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def __lowercase ( __lowercase ) -> Optional[int]: '''simple docstring''' _A = torch.load(__lowercase , map_location="cpu" ) _A = Namespace(**checkpoint["cfg"]["model"] ) _A = checkpoint["model"] remove_ignore_keys_(__lowercase ) _A = state_dict["decoder.embed_tokens.weight"].shape[0] _A = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()} _A = XGLMConfig( vocab_size=__lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) _A = XGLMForCausalLM(__lowercase ) _A = model.load_state_dict(__lowercase , strict=__lowercase ) print(__lowercase ) _A = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
79
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
0
'''simple docstring''' from __future__ import annotations a__ : Dict = list[tuple[int, int]] a__ : Tuple = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a__ : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class lowercase_ : def __init__( self , a , a , a , a , a , a , ): UpperCamelCase__ = pos_x UpperCamelCase__ = pos_y UpperCamelCase__ = (pos_y, pos_x) UpperCamelCase__ = goal_x UpperCamelCase__ = goal_y UpperCamelCase__ = g_cost UpperCamelCase__ = parent UpperCamelCase__ = self.calculate_heuristic() def __a ( self ): UpperCamelCase__ = abs(self.pos_x - self.goal_x ) UpperCamelCase__ = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , a ): return self.f_cost < other.f_cost class lowercase_ : def __init__( self , a , a ): UpperCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a ) UpperCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a ) UpperCamelCase__ = [self.start] UpperCamelCase__ = [] UpperCamelCase__ = False def __a ( self ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCamelCase__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: UpperCamelCase__ = True return self.retrace_path(a ) self.closed_nodes.append(a ) UpperCamelCase__ = self.get_successors(a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(a ) else: # retrieve the best current path UpperCamelCase__ = self.open_nodes.pop(self.open_nodes.index(a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(a ) else: self.open_nodes.append(a ) if not self.reached: return [self.start.pos] return None def __a ( self , a ): UpperCamelCase__ = [] for action in delta: UpperCamelCase__ = parent.pos_x + action[1] UpperCamelCase__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( a , a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a , ) ) return successors def __a ( self , a ): UpperCamelCase__ = node UpperCamelCase__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCamelCase__ = current_node.parent path.reverse() return path if __name__ == "__main__": a__ : Union[str, Any] = (0, 0) a__ : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') a__ : Any = GreedyBestFirst(init, goal) a__ : List[Any] = greedy_bf.search() if path: for pos_x, pos_y in path: a__ : str = 2 for elem in grid: print(elem)
80
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0