code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_lowercase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self : int):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
torch.manual_seed(0)
snake_case__ = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0)
snake_case__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0)
snake_case__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ = CLIPTextModel(UpperCamelCase__)
snake_case__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
snake_case__ = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = 2
snake_case__ = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__) , )
snake_case__ = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1)[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase__)).convert("""RGB""").resize((6_4, 6_4))
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3)
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = StableDiffusionControlNetImgaImgPipeline
_lowercase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase : int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
torch.manual_seed(0)
def init_weights(UpperCamelCase__ : Tuple):
if isinstance(UpperCamelCase__ , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
snake_case__ = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__)
torch.manual_seed(0)
snake_case__ = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__)
torch.manual_seed(0)
snake_case__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0)
snake_case__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ = CLIPTextModel(UpperCamelCase__)
snake_case__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
snake_case__ = MultiControlNetModel([controlneta, controlneta])
snake_case__ = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = 2
snake_case__ = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__) , ),
]
snake_case__ = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1)[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase__)).convert("""RGB""").resize((6_4, 6_4))
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase__)
pipe.to(UpperCamelCase__)
snake_case__ = 10.0
snake_case__ = 4
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
snake_case__ = steps
snake_case__ = scale
snake_case__ = pipe(**UpperCamelCase__)[0]
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
snake_case__ = steps
snake_case__ = scale
snake_case__ = pipe(**UpperCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
snake_case__ = steps
snake_case__ = scale
snake_case__ = pipe(**UpperCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
snake_case__ = steps
snake_case__ = scale
snake_case__ = pipe(**UpperCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1E-3
assert np.sum(np.abs(output_a - output_a)) > 1E-3
assert np.sum(np.abs(output_a - output_a)) > 1E-3
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Tuple):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase__)
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase__)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""")
snake_case__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase__ , controlnet=UpperCamelCase__)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = torch.Generator(device="""cpu""").manual_seed(0)
snake_case__ = """evil space-punk bird"""
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""").resize((5_1_2, 5_1_2))
snake_case__ = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""").resize((5_1_2, 5_1_2))
snake_case__ = pipe(
UpperCamelCase__ , UpperCamelCase__ , control_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="""np""" , num_inference_steps=5_0 , strength=0.6 , )
snake_case__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
snake_case__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""")
assert np.abs(expected_image - image).max() < 9E-2
| 654 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _UpperCAmelCase ( *a : List[str] ):
with open(a , """r""" ) as fh:
fcntl.flock(a , fcntl.LOCK_EX )
try:
print(*a )
finally:
fcntl.flock(a , fcntl.LOCK_UN )
a__ = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
a__ = torch.device("""cuda""", local_rank)
a__ = socket.gethostname()
a__ = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a__ = dist.get_rank()
a__ = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 654 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _UpperCAmelCase ( a : List[str] , a : Union[str, Any] , a : List[str]=None , a : Dict=None , a : List[Any]=None , a : int=None , a : Tuple=None , a : int=None , ):
if attention_mask is None:
snake_case__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
snake_case__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
snake_case__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=1_3 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : Tuple=9_9 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[Any]=3_2 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Tuple=0.02 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
snake_case__ = initializer_range
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
snake_case__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
snake_case__ = shift_tokens_right(UpperCamelCase__ , 1 , 2)
snake_case__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , )
snake_case__ = prepare_blenderbot_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ , snake_case__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
snake_case__ = 2_0
snake_case__ = model_class_name(UpperCamelCase__)
snake_case__ = model.encode(inputs_dict["""input_ids"""])
snake_case__ , snake_case__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
snake_case__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
snake_case__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
snake_case__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
snake_case__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase__ , )
snake_case__ = model.decode(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = 2_0
snake_case__ = model_class_name(UpperCamelCase__)
snake_case__ = model.encode(inputs_dict["""input_ids"""])
snake_case__ , snake_case__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
snake_case__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
snake_case__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
snake_case__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
snake_case__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
snake_case__ = model.decode(UpperCamelCase__ , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__)
snake_case__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = 99
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
snake_case__ = input_ids.shape[0]
snake_case__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self._get_config_and_data()
snake_case__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase__)
snake_case__ = lm_model(input_ids=UpperCamelCase__)
snake_case__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
snake_case__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase__)
snake_case__ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
snake_case__ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
snake_case__ = lm_model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__)
snake_case__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCamelCase__)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
snake_case__ = shift_tokens_right(UpperCamelCase__ , 1 , 2)
snake_case__ = np.equal(UpperCamelCase__ , 1).astype(np.floataa).sum()
snake_case__ = np.equal(UpperCamelCase__ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCamelCase__ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _lowerCAmelCase ( lowercase_ , unittest.TestCase , lowercase_ ):
"""simple docstring"""
_lowercase : str = True
_lowercase : Dict = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowercase : int = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = FlaxBlenderbotModelTester(self)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
snake_case__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = model_class(UpperCamelCase__)
@jax.jit
def encode_jitted(UpperCamelCase__ : List[Any] , UpperCamelCase__ : str=None , **UpperCamelCase__ : List[Any]):
return model.encode(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)
with self.subTest("""JIT Enabled"""):
snake_case__ = encode_jitted(**UpperCamelCase__).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
snake_case__ = encode_jitted(**UpperCamelCase__).to_tuple()
self.assertEqual(len(UpperCamelCase__) , len(UpperCamelCase__))
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__):
self.assertEqual(jitted_output.shape , output.shape)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
snake_case__ = model_class(UpperCamelCase__)
snake_case__ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
snake_case__ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]):
return model.decode(
decoder_input_ids=UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , encoder_outputs=UpperCamelCase__ , )
with self.subTest("""JIT Enabled"""):
snake_case__ = decode_jitted(**UpperCamelCase__).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
snake_case__ = decode_jitted(**UpperCamelCase__).to_tuple()
self.assertEqual(len(UpperCamelCase__) , len(UpperCamelCase__))
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case__ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
snake_case__ = np.ones((1, 1)) * model.config.eos_token_id
snake_case__ = model(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
snake_case__ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
snake_case__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCamelCase__)
snake_case__ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
snake_case__ = ["""Sam"""]
snake_case__ = tokenizer(UpperCamelCase__ , return_tensors="""jax""")
snake_case__ = model.generate(**UpperCamelCase__ , **UpperCamelCase__)
snake_case__ = """Sam is a great name. It means \"sun\" in Gaelic."""
snake_case__ = tokenizer.batch_decode(UpperCamelCase__ , **UpperCamelCase__)
assert generated_txt[0].strip() == tgt_text
| 654 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""nielsr/canine-s""": 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
a__ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a__ = 0
a__ = 0xe000
a__ = 0xe001
a__ = 0xe002
a__ = 0xe003
a__ = 0xe004
# Maps special codepoints to human-readable names.
a__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCamelCase__ : Optional[int]=chr(UpperCamelCase__) , UpperCamelCase__ : Optional[int]=chr(UpperCamelCase__) , UpperCamelCase__ : Tuple=chr(UpperCamelCase__) , UpperCamelCase__ : List[str]=chr(UpperCamelCase__) , UpperCamelCase__ : Any=chr(UpperCamelCase__) , UpperCamelCase__ : Tuple=chr(UpperCamelCase__) , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[Any]=2_0_4_8 , **UpperCamelCase__ : Any , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else bos_token
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else eos_token
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else sep_token
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else cls_token
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else mask_token
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , model_max_length=UpperCamelCase__ , **UpperCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
snake_case__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
snake_case__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
snake_case__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
snake_case__ = UNICODE_VOCAB_SIZE
snake_case__ = len(self._special_codepoints)
@property
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return self._unicode_vocab_size
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
try:
return ord(UpperCamelCase__)
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''')
def __magic_name__ ( self : str , UpperCamelCase__ : int):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__)
except TypeError:
raise ValueError(F'''invalid id: {index}''')
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return "".join(UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None):
'''simple docstring'''
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
snake_case__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __magic_name__ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__)
snake_case__ = [1] + ([0] * len(UpperCamelCase__)) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__)) + [1]
return result
def __magic_name__ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None):
'''simple docstring'''
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
snake_case__ = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
return ()
| 654 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCAmelCase ( a : List[str] ):
snake_case__ = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
snake_case__ = True if """large""" in model_name or """huge""" in model_name else False
snake_case__ = True if """large""" in model_name or """huge""" in model_name else False
snake_case__ = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case__ = [3, 3, 3, 3]
snake_case__ = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case__ = [4, 4, 4, 4]
snake_case__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case__ = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case__ = [3, 3, 3, 3]
else:
snake_case__ = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case__ = 96
elif "small" in model_name:
snake_case__ = 96
elif "base" in model_name:
snake_case__ = 128
elif "large" in model_name:
snake_case__ = 192
elif "xlarge" in model_name:
snake_case__ = 256
elif "huge" in model_name:
snake_case__ = 352
# set label information
snake_case__ = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
snake_case__ = """imagenet-22k-id2label.json"""
else:
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = FocalNetConfig(
embed_dim=a , depths=a , focal_levels=a , focal_windows=a , use_conv_embed=a , idalabel=a , labelaid=a , use_post_layernorm=a , use_layerscale=a , )
return config
def _UpperCAmelCase ( a : Union[str, Any] ):
if "patch_embed.proj" in name:
snake_case__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case__ = """encoder.""" + name
if "encoder.layers" in name:
snake_case__ = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
snake_case__ = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
snake_case__ = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case__ = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case__ = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case__ = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
snake_case__ = """layernorm.weight"""
if name == "norm.bias":
snake_case__ = """layernorm.bias"""
if "head" in name:
snake_case__ = name.replace("""head""" , """classifier""" )
else:
snake_case__ = """focalnet.""" + name
return name
def _UpperCAmelCase ( a : Optional[Any] , a : str , a : List[str]=False ):
# fmt: off
snake_case__ = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
snake_case__ = model_name_to_url[model_name]
print("""Checkpoint URL: """ , a )
snake_case__ = torch.hub.load_state_dict_from_url(a , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
snake_case__ = state_dict.pop(a )
snake_case__ = val
snake_case__ = get_focalnet_config(a )
snake_case__ = FocalNetForImageClassification(a )
model.eval()
# load state dict
model.load_state_dict(a )
# verify conversion
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = BitImageProcessor(
do_resize=a , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=a , crop_size=224 , do_normalize=a , image_mean=a , image_std=a , )
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
snake_case__ = processor(images=a , return_tensors="""pt""" )
snake_case__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case__ = image_transforms(a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , a , atol=1e-4 )
snake_case__ = model(**a )
snake_case__ = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case__ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
snake_case__ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
snake_case__ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
snake_case__ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
snake_case__ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
snake_case__ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
processor.save_pretrained(a )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
a__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 654 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = StableDiffusionInstructPixaPixPipeline
_lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_lowercase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowercase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowercase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
snake_case__ = PNDMScheduler(skip_prk_steps=UpperCamelCase__)
torch.manual_seed(0)
snake_case__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ = CLIPTextModel(UpperCamelCase__)
snake_case__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
snake_case__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : int=0):
'''simple docstring'''
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1)[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase__)).convert("""RGB""")
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components()
snake_case__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__)
snake_case__ = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
snake_case__ = sd_pipe(**UpperCamelCase__).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components()
snake_case__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__)
snake_case__ = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
snake_case__ = """french fries"""
snake_case__ = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__)
snake_case__ = output.images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components()
snake_case__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__)
snake_case__ = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
snake_case__ = [inputs["""prompt"""]] * 2
snake_case__ = np.array(inputs["""image"""]).astype(np.floataa) / 2_55.0
snake_case__ = torch.from_numpy(UpperCamelCase__).unsqueeze(0).to(UpperCamelCase__)
snake_case__ = image / 2 + 0.5
snake_case__ = image.permute(0 , 3 , 1 , 2)
snake_case__ = image.repeat(2 , 1 , 1 , 1)
snake_case__ = sd_pipe(**UpperCamelCase__).images
snake_case__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case__ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components()
snake_case__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""")
snake_case__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__)
snake_case__ = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
snake_case__ = sd_pipe(**UpperCamelCase__).images
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = [round(UpperCamelCase__ , 4) for x in image_slice.flatten().tolist()]
print(""",""".join([str(UpperCamelCase__) for x in slice]))
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __magic_name__ ( self : str):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.get_dummy_components()
snake_case__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__)
snake_case__ = VaeImageProcessor(do_resize=UpperCamelCase__ , do_normalize=UpperCamelCase__)
snake_case__ = pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="""pt"""))[0]
snake_case__ = components["""vae"""]
snake_case__ = self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="""pt""")
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ = vae.encode(inputs[image_param]).latent_dist.mode()
snake_case__ = pipe(**UpperCamelCase__)[0]
snake_case__ = np.abs(out - out_latents_inputs).max()
self.assertLess(UpperCamelCase__ , 1E-4 , """passing latents as image input generate different result from passing image""")
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0):
'''simple docstring'''
snake_case__ = torch.manual_seed(UpperCamelCase__)
snake_case__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""")
snake_case__ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__)
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
snake_case__ = self.get_inputs()
snake_case__ = pipe(**UpperCamelCase__).images
snake_case__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__)
snake_case__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
snake_case__ = self.get_inputs()
snake_case__ = pipe(**UpperCamelCase__).images
snake_case__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__)
snake_case__ = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
snake_case__ = self.get_inputs()
snake_case__ = pipe(**UpperCamelCase__).images
snake_case__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = 0
def callback_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor) -> None:
snake_case__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ = latents[0, -3:, -3:, -1]
snake_case__ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
snake_case__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ = latents[0, -3:, -3:, -1]
snake_case__ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
snake_case__ = False
snake_case__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa)
snake_case__ = pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
snake_case__ = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa)
snake_case__ = pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case__ = self.get_inputs()
snake_case__ = pipe(**UpperCamelCase__)
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ = inputs["""image"""].resize((5_0_4, 5_0_4))
snake_case__ = """timbrooks/instruct-pix2pix"""
snake_case__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
snake_case__ = pipe(**UpperCamelCase__)
snake_case__ = output.images[0]
snake_case__ = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case__ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( a : list[list[int]] ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = JukeboxTokenizer
_lowercase : str = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import torch
snake_case__ = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""")
snake_case__ = tokenizer(**self.metas)["""input_ids"""]
# fmt: off
snake_case__ = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def __magic_name__ ( self : int):
'''simple docstring'''
import torch
snake_case__ = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""")
snake_case__ = tokenizer(**self.metas)["""input_ids"""]
# fmt: off
snake_case__ = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 654 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 1 |
def _UpperCAmelCase ( a : int ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
snake_case__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
snake_case__ = 1
if upper_limit > 0:
snake_case__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
a__ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 654 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a__ = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
a__ = 1_0
a__ = 2_5_6
def _UpperCAmelCase ( a : List[str] ):
if len(a ) < MIN_NUM_TOKENS:
return None
snake_case__ = MinHash(num_perm=a )
for token in set(a ):
min_hash.update(token.encode() )
return min_hash
def _UpperCAmelCase ( a : str ):
return {t for t in NON_ALPHA.split(a ) if len(t.strip() ) > 0}
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , *,
UpperCamelCase__ : float = 0.85 , ):
'''simple docstring'''
snake_case__ = duplication_jaccard_threshold
snake_case__ = NUM_PERM
snake_case__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
snake_case__ = defaultdict(UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : MinHash):
'''simple docstring'''
snake_case__ = self._index.query(UpperCamelCase__)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(UpperCamelCase__ , UpperCamelCase__)
if len(UpperCamelCase__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCamelCase__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = []
for base, duplicates in self._duplicate_clusters.items():
snake_case__ = [base] + list(UpperCamelCase__)
# reformat the cluster to be a list of dict
snake_case__ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(UpperCamelCase__)
return duplicate_clusters
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
snake_case__ = self.get_duplicate_clusters()
with open(UpperCamelCase__ , """w""") as f:
json.dump(UpperCamelCase__ , UpperCamelCase__)
def _UpperCAmelCase ( a : List[Any] ):
snake_case__ , snake_case__ = element
snake_case__ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCAmelCase ( a : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def _UpperCAmelCase ( a : Type[Dataset] , a : float ):
snake_case__ = DuplicationIndex(duplication_jaccard_threshold=a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a ) ) , max_queue_size=100 ) ):
di.add(a , a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = get_tokens(a )
snake_case__ = get_tokens(a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a__ = None
def _UpperCAmelCase ( a : Optional[Any] , a : str ):
snake_case__ = []
for elementa in cluster:
snake_case__ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case__ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(a , a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case__ = 1
extremes.append(a )
return extremes
def _UpperCAmelCase ( a : Optional[int] , a : Union[str, Any] , a : Optional[int] ):
global _shared_dataset
snake_case__ = dataset
snake_case__ = []
snake_case__ = partial(_find_cluster_extremes_shared , jaccard_threshold=a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a , a , ) , total=len(a ) , ):
extremes_list.append(a )
return extremes_list
def _UpperCAmelCase ( a : Type[Dataset] , a : float = 0.85 ):
snake_case__ = make_duplicate_clusters(a , a )
snake_case__ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case__ = {}
snake_case__ = find_extremes(a , a , a )
for extremes in extremes_clusters:
for element in extremes:
snake_case__ = element
snake_case__ = duplicate_indices - set(extreme_dict.keys() )
snake_case__ = dataset.filter(lambda a , a : idx not in remove_indices , with_indices=a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case__ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case__ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(a )}''' )
print(F'''Number of duplicate clusters: {len(a )}''' )
print(F'''Files in duplicate cluster: {len(a )}''' )
print(F'''Unique files in duplicate cluster: {len(a )}''' )
print(F'''Filtered dataset size: {len(a )}''' )
return ds_filter, duplicate_clusters
| 654 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a__ = """src/transformers"""
a__ = """docs/source/en"""
a__ = """."""
def _UpperCAmelCase ( a : Tuple , a : Optional[int] , a : int ):
with open(a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ = f.readlines()
# Find the start prompt.
snake_case__ = 0
while not lines[start_index].startswith(a ):
start_index += 1
start_index += 1
snake_case__ = start_index
while not lines[end_index].startswith(a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a__ = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
a__ = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
a__ = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a__ = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
a__ = direct_transformers_import(TRANSFORMERS_PATH)
def _UpperCAmelCase ( a : Optional[int] ):
snake_case__ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , a )
return [m.group(0 ) for m in matches]
def _UpperCAmelCase ( a : Dict , a : List[Any] ):
snake_case__ = 2 if text == """✅""" or text == """❌""" else len(a )
snake_case__ = (width - text_length) // 2
snake_case__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _UpperCAmelCase ( ):
snake_case__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
snake_case__ = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
snake_case__ = collections.defaultdict(a )
snake_case__ = collections.defaultdict(a )
snake_case__ = collections.defaultdict(a )
snake_case__ = collections.defaultdict(a )
snake_case__ = collections.defaultdict(a )
# Let's lookup through all transformers object (once).
for attr_name in dir(a ):
snake_case__ = None
if attr_name.endswith("""Tokenizer""" ):
snake_case__ = slow_tokenizers
snake_case__ = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
snake_case__ = fast_tokenizers
snake_case__ = attr_name[:-13]
elif _re_tf_models.match(a ) is not None:
snake_case__ = tf_models
snake_case__ = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
snake_case__ = flax_models
snake_case__ = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
snake_case__ = pt_models
snake_case__ = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_name_to_prefix.values():
snake_case__ = True
break
# Try again after removing the last word in the name
snake_case__ = """""".join(camel_case_split(a )[:-1] )
# Let's build that table!
snake_case__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
snake_case__ = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
snake_case__ = [len(a ) + 2 for c in columns]
snake_case__ = max([len(a ) for name in model_names] ) + 2
# Build the table per se
snake_case__ = """|""" + """|""".join([_center_text(a , a ) for c, w in zip(a , a )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
snake_case__ = {True: """✅""", False: """❌"""}
for name in model_names:
snake_case__ = model_name_to_prefix[name]
snake_case__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(a , a ) for l, w in zip(a , a )] ) + "|\n"
return table
def _UpperCAmelCase ( a : Any=False ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ = _find_text_in_file(
filename=os.path.join(a , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
snake_case__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(a , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 654 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 1 |
import os
def _UpperCAmelCase ( a : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
snake_case__ = in_file.read()
snake_case__ = [[int(a ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
snake_case__ = [[0 for cell in row] for row in grid]
snake_case__ = len(grid[0] )
snake_case__ = [[0 for i in range(a )] for j in range(a )]
snake_case__ = grid[0][0]
for i in range(1 , a ):
snake_case__ = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
snake_case__ = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
snake_case__ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 1 |
def _UpperCAmelCase ( a : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
snake_case__ = [True] * (num + 1)
snake_case__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a ):
snake_case__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 654 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 1 |
from functools import lru_cache
def _UpperCAmelCase ( a : int ):
snake_case__ = 2
snake_case__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(a )
if n > 1:
factors.add(a )
return factors
@lru_cache
def _UpperCAmelCase ( a : int ):
return len(unique_prime_factors(a ) )
def _UpperCAmelCase ( a : list ):
return len(set(a ) ) in (0, 1)
def _UpperCAmelCase ( a : int ):
snake_case__ = 2
while True:
# Increment each value of a generated range
snake_case__ = [base + i for i in range(a )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
snake_case__ = [upf_len(a ) for x in group]
checker.append(a )
# If all numbers in the list are equal, return the group variable.
if equality(a ):
return group
# Increment our base variable by 1
base += 1
def _UpperCAmelCase ( a : int = 4 ):
snake_case__ = run(a )
return results[0] if len(a ) else None
if __name__ == "__main__":
print(solution())
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( a : int , a : int ):
if b == 0:
return (1, 0)
((snake_case__) , (snake_case__)) = extended_euclid(a , a % b )
snake_case__ = a // b
return (y, x - k * y)
def _UpperCAmelCase ( a : int , a : int , a : int , a : int ):
((snake_case__) , (snake_case__)) = extended_euclid(a , a )
snake_case__ = na * na
snake_case__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _UpperCAmelCase ( a : int , a : int ):
((snake_case__) , (snake_case__)) = extended_euclid(a , a )
if b < 0:
snake_case__ = (b % n + n) % n
return b
def _UpperCAmelCase ( a : int , a : int , a : int , a : int ):
snake_case__ , snake_case__ = invert_modulo(a , a ), invert_modulo(a , a )
snake_case__ = na * na
snake_case__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 654 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize"""))
self.assertTrue(hasattr(UpperCamelCase__ , """size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr"""))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__)
self.assertIsInstance(encoding.boxes , UpperCamelCase__)
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""")
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__)
self.assertListEqual(encoding.boxes , UpperCamelCase__)
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__)
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 654 | 1 |
def _UpperCAmelCase ( a : str , a : str ):
if len(a ) != len(a ):
raise ValueError("""String lengths must match!""" )
snake_case__ = 0
for chara, chara in zip(a , a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 | 1 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = ['''torch''']
def __init__( self : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = ['''torch''']
def __init__( self : Dict , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = ['''torch''']
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Dict = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Dict = ['''torch''']
def __init__( self : Any , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = ['''torch''']
def __init__( self : Any , *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : str = ['''torch''']
def __init__( self : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''torch''']
def __init__( self : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = ['''torch''']
def __init__( self : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : str = ['''torch''']
def __init__( self : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
def _UpperCAmelCase ( *a : str , **a : List[Any] ):
requires_backends(a , ["""torch"""] )
def _UpperCAmelCase ( *a : Tuple , **a : Tuple ):
requires_backends(a , ["""torch"""] )
def _UpperCAmelCase ( *a : Optional[Any] , **a : Any ):
requires_backends(a , ["""torch"""] )
def _UpperCAmelCase ( *a : Optional[Any] , **a : Union[str, Any] ):
requires_backends(a , ["""torch"""] )
def _UpperCAmelCase ( *a : Any , **a : Optional[int] ):
requires_backends(a , ["""torch"""] )
def _UpperCAmelCase ( *a : Optional[Any] , **a : Any ):
requires_backends(a , ["""torch"""] )
def _UpperCAmelCase ( *a : Optional[Any] , **a : int ):
requires_backends(a , ["""torch"""] )
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : str = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : str = ['''torch''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Dict = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Any = ['''torch''']
def __init__( self : str , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Any = ['''torch''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : str , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''torch''']
def __init__( self : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Any = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Tuple = ['''torch''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Tuple , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ['''torch''']
def __init__( self : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = ['''torch''']
def __init__( self : Dict , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = ['''torch''']
def __init__( self : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Tuple = ['''torch''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''torch''']
def __init__( self : str , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Tuple , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = ['''torch''']
def __init__( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ['''torch''']
def __init__( self : Dict , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : str = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''torch''']
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = ['''torch''']
def __init__( self : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : str = ['''torch''']
def __init__( self : str , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ['''torch''']
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Tuple , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : int , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : int = ['''torch''']
def __init__( self : Dict , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Tuple = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase__ : int , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = ['''torch''']
def __init__( self : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : int = ['''torch''']
def __init__( self : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Tuple = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Any , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Any = ['''torch''']
def __init__( self : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : int):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : str = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Optional[int] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ['''torch''']
def __init__( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
class _lowerCAmelCase ( metaclass=lowercase_ ):
"""simple docstring"""
_lowercase : Any = ['''torch''']
def __init__( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict):
'''simple docstring'''
requires_backends(self , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
@classmethod
def __magic_name__ ( cls : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple):
'''simple docstring'''
requires_backends(cls , ["""torch"""])
| 654 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 1 |
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( a : str = "AAPL" ):
snake_case__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
snake_case__ = BeautifulSoup(requests.get(a ).text , """html.parser""" )
snake_case__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 654 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 1 |
def _UpperCAmelCase ( a : Tuple , a : Tuple , a : Optional[Any] , a : Tuple , a : Optional[Any] , a : Union[str, Any] ):
if index == r:
for j in range(a ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
snake_case__ = arr[i]
combination_util(a , a , a , index + 1 , a , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a , a , a , a , a , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCAmelCase ( a : int , a : Tuple , a : Optional[Any] ):
# A temporary array to store all combination one by one
snake_case__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a , a , a , 0 , a , 0 )
if __name__ == "__main__":
# Driver code to check the function above
a__ = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 654 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCAmelCase ( a : Union[str, Any] , a : Optional[int] ):
assert isinstance(a , a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCAmelCase ( a : str , a : str , a : str ):
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ = ParquetDatasetReader(a , cache_dir=a , keep_in_memory=a ).read()
_check_parquet_dataset(a , a )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCAmelCase ( a : Tuple , a : List[str] , a : Tuple ):
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ = features.copy() if features else default_expected_features
snake_case__ = (
Features({feature: Value(a ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ = ParquetDatasetReader(a , features=a , cache_dir=a ).read()
_check_parquet_dataset(a , a )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _UpperCAmelCase ( a : Any , a : Optional[int] , a : Any ):
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ = ParquetDatasetReader(a , cache_dir=a , split=a ).read()
_check_parquet_dataset(a , a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _UpperCAmelCase ( a : Any , a : Dict , a : List[str] ):
if issubclass(a , a ):
snake_case__ = parquet_path
elif issubclass(a , a ):
snake_case__ = [parquet_path]
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ = ParquetDatasetReader(a , cache_dir=a ).read()
_check_parquet_dataset(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Optional[int] , a : Tuple=("train",) ):
assert isinstance(a , a )
for split in splits:
snake_case__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCAmelCase ( a : List[Any] , a : Optional[int] , a : List[Any] ):
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=a , keep_in_memory=a ).read()
_check_parquet_datasetdict(a , a )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCAmelCase ( a : Union[str, Any] , a : Union[str, Any] , a : Any ):
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ = features.copy() if features else default_expected_features
snake_case__ = (
Features({feature: Value(a ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ = ParquetDatasetReader({"""train""": parquet_path} , features=a , cache_dir=a ).read()
_check_parquet_datasetdict(a , a )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _UpperCAmelCase ( a : str , a : Tuple , a : Dict ):
if split:
snake_case__ = {split: parquet_path}
else:
snake_case__ = """train"""
snake_case__ = {"""train""": parquet_path, """test""": parquet_path}
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ = ParquetDatasetReader(a , cache_dir=a ).read()
_check_parquet_datasetdict(a , a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCAmelCase ( a : Dict , a : Tuple ):
snake_case__ = ParquetDatasetWriter(a , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case__ = pq.ParquetFile(tmp_path / """foo.parquet""" )
snake_case__ = pf.read()
assert dataset.data.table == output_table
def _UpperCAmelCase ( a : List[str] , a : int ):
snake_case__ = str(shared_datadir / """test_image_rgb.jpg""" )
snake_case__ = {"""image""": [image_path]}
snake_case__ = Features({"""image""": Image()} )
snake_case__ = Dataset.from_dict(a , features=a )
snake_case__ = ParquetDatasetWriter(a , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case__ = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
snake_case__ = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _UpperCAmelCase ( a : int , a : Optional[Any] ):
assert get_writer_batch_size(a ) == expected
| 654 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
a__ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
a__ = {
"""camembert-base""": 5_1_2,
}
a__ = """▁"""
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Union[str, Any]="<pad>" , UpperCamelCase__ : Dict="<mask>" , UpperCamelCase__ : Any=["<s>NOTUSED", "</s>NOTUSED"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else mask_token
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCamelCase__))
snake_case__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
snake_case__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
snake_case__ = len(self.fairseq_tokens_to_ids)
snake_case__ = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
snake_case__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __magic_name__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ = [self.cls_token_id]
snake_case__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__)
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__)) + [1]
return [1] + ([0] * len(UpperCamelCase__)) + [1, 1] + ([0] * len(UpperCamelCase__)) + [1]
def __magic_name__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None):
'''simple docstring'''
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __magic_name__ ( self : int , UpperCamelCase__ : str):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__)
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCamelCase__) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __magic_name__ ( self : int , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = []
snake_case__ = """"""
snake_case__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__) + token
snake_case__ = True
snake_case__ = []
else:
current_sub_tokens.append(UpperCamelCase__)
snake_case__ = False
out_string += self.sp_model.decode(UpperCamelCase__)
return out_string.strip()
def __getstate__( self : int):
'''simple docstring'''
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase__ , """wb""") as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__)
return (out_vocab_file,)
| 654 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 654 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 654 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
a__ = 5
a__ = 1_0
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = SpeechaTextTokenizer
_lowercase : Tuple = False
_lowercase : Optional[int] = True
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = sp.SentencePieceProcessor()
spm_model.Load(UpperCamelCase__)
snake_case__ = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCamelCase__))]
snake_case__ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
snake_case__ = Path(self.tmpdirname)
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""])
snake_case__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = """<pad>"""
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__) , UpperCamelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__) , UpperCamelCase__)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """j""")
self.assertEqual(len(UpperCamelCase__) , 1_0_0_1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
snake_case__ = tokenizer.tokenize("""This is a test""")
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
snake_case__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
snake_case__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8])
snake_case__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__)
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = {"""input_ids""": [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = '''valhalla/s2t_mustc_multilinguial_medium'''
_lowercase : Any = '''C\'est trop cool'''
_lowercase : List[Any] = '''Esto es genial'''
@classmethod
def __magic_name__ ( cls : int):
'''simple docstring'''
snake_case__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def __magic_name__ ( self : int):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 1_1)
def __magic_name__ ( self : int):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0)
def __magic_name__ ( self : Any):
'''simple docstring'''
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids)
snake_case__ = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
snake_case__ = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__)
snake_case__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = """fr"""
snake_case__ = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , UpperCamelCase__)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
snake_case__ = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 654 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any]=3_0_0_0_0 , UpperCamelCase__ : Optional[int]=1_2_8 , UpperCamelCase__ : Tuple=4_0_9_6 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=6_4 , UpperCamelCase__ : List[str]=1_6_3_8_4 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int="gelu_new" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Union[str, Any]="absolute" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : int=3 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
snake_case__ = vocab_size
snake_case__ = embedding_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_hidden_groups
snake_case__ = num_attention_heads
snake_case__ = inner_group_num
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = classifier_dropout_prob
snake_case__ = position_embedding_type
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 654 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def _UpperCAmelCase ( a : str , a : tuple , a : Path , a : Optional[int] , a : Optional[int] , a : Optional[int] , a : Dict , a : List[Any]=False , ):
output_path.parent.mkdir(parents=a , exist_ok=a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , use_external_data_format=a , enable_onnx_checker=a , opset_version=a , )
else:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , opset_version=a , )
@torch.no_grad()
def _UpperCAmelCase ( a : str , a : str , a : int , a : bool = False ):
snake_case__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case__ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
snake_case__ = """cpu"""
snake_case__ = Path(a )
# VAE DECODER
snake_case__ = AutoencoderKL.from_pretrained(model_path + """/vae""" )
snake_case__ = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case__ = vae_decoder.decode
onnx_export(
a , model_args=(
torch.randn(1 , a , 25 , 25 ).to(device=a , dtype=a ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=a , )
del vae_decoder
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=1_4,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
a__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 654 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a__ = HfArgumentParser(InitializationArguments)
a__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a__ = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
a__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 654 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[Any] = OpenAIGPTTokenizer
_lowercase : Optional[Any] = OpenAIGPTTokenizerFast
_lowercase : str = True
_lowercase : Dict = False
def __magic_name__ ( self : Any):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
snake_case__ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
snake_case__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""") as fp:
fp.write(json.dumps(UpperCamelCase__))
with open(self.merges_file , """w""") as fp:
fp.write("""\n""".join(UpperCamelCase__))
def __magic_name__ ( self : Any , UpperCamelCase__ : Dict):
'''simple docstring'''
return "lower newer", "lower newer"
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
snake_case__ = """lower"""
snake_case__ = ["""low""", """er</w>"""]
snake_case__ = tokenizer.tokenize(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = tokens + ["""<unk>"""]
snake_case__ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , UpperCamelCase__)
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any]=1_5):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
snake_case__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# Simple input
snake_case__ = """This is a simple input"""
snake_case__ = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case__ = ("""This is a simple input""", """This is a pair""")
snake_case__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""")
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""")
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""")
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""")
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
pass
| 654 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : str , a : str , a : Optional[Any] ):
snake_case__ = os.path.abspath(a )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
snake_case__ = tf.train.list_variables(a )
snake_case__ = []
snake_case__ = []
snake_case__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case__ = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case__ = name[1:]
# figure out how many levels deep the name is
snake_case__ = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(a )
# read data
snake_case__ = tf.train.load_variable(a , a )
names.append("""/""".join(a ) )
arrays.append(a )
logger.info(F'''Read a total of {len(a ):,} layers''' )
# Sanity check
if len(set(a ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(a ) )})''' )
snake_case__ = list(set(a ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(a , a ):
snake_case__ = full_name.split("""/""" )
snake_case__ = model
snake_case__ = []
for i, m_name in enumerate(a ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
snake_case__ = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
snake_case__ = getattr(a , """embeddings""" )
snake_case__ = getattr(a , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
snake_case__ = getattr(a , """encoder""" )
snake_case__ = getattr(a , """layer""" )
snake_case__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
snake_case__ = getattr(a , """pooler""" )
snake_case__ = getattr(a , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
snake_case__ = getattr(a , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
snake_case__ = getattr(a , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
snake_case__ = getattr(a , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
snake_case__ = getattr(a , """token_type_embeddings""" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
snake_case__ = getattr(a , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
snake_case__ = getattr(a , """attention""" )
snake_case__ = getattr(a , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
snake_case__ = getattr(a , """attention""" )
snake_case__ = getattr(a , """output""" )
snake_case__ = getattr(a , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
snake_case__ = getattr(a , """attention""" )
snake_case__ = getattr(a , """output""" )
snake_case__ = getattr(a , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
snake_case__ = getattr(a , """output""" )
snake_case__ = getattr(a , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
snake_case__ = getattr(a , """output""" )
snake_case__ = getattr(a , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
snake_case__ = getattr(a , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
snake_case__ = getattr(a , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
snake_case__ = getattr(a , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
snake_case__ = getattr(a , """intermediate""" )
snake_case__ = getattr(a , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
snake_case__ = getattr(a , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
snake_case__ = getattr(a , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
snake_case__ = getattr(a , """weight""" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
snake_case__ = """.""".join(a )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , a ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , a ):
snake_case__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case__ = array.transpose()
if pointer.shape == array.shape:
snake_case__ = torch.from_numpy(a )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def _UpperCAmelCase ( a : Optional[int] , a : List[str] , a : Optional[int] ):
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
snake_case__ = BertConfig.from_json_file(a )
snake_case__ = BertModel(a )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(a , a , a )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
a__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 654 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 1 |
import math
import os
import sys
def _UpperCAmelCase ( a : str ):
snake_case__ = """"""
try:
with open(a , """rb""" ) as binary_file:
snake_case__ = binary_file.read()
for dat in data:
snake_case__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _UpperCAmelCase ( a : dict[str, str] , a : str , a : int , a : str ):
lexicon.pop(a )
snake_case__ = last_match_id
if math.loga(a ).is_integer():
for curr_key in lexicon:
snake_case__ = """0""" + lexicon[curr_key]
snake_case__ = bin(a )[2:]
def _UpperCAmelCase ( a : str ):
snake_case__ = {"""0""": """0""", """1""": """1"""}
snake_case__ , snake_case__ = """""", """"""
snake_case__ = len(a )
for i in range(len(a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case__ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(a , a , a , a )
index += 1
snake_case__ = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
snake_case__ = lexicon[curr_string]
result += last_match_id
return result
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = os.path.getsize(a )
snake_case__ = bin(a )[2:]
snake_case__ = len(a )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = 8
try:
with open(a , """wb""" ) as opened_file:
snake_case__ = [
to_write[i : i + byte_length]
for i in range(0 , len(a ) , a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(a , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = read_file_binary(a )
snake_case__ = compress_data(a )
snake_case__ = add_file_length(a , a )
write_file_binary(a , a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 654 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 1 |
import random
def _UpperCAmelCase ( a : list , a : Any ):
snake_case__ , snake_case__ , snake_case__ = [], [], []
for element in data:
if element < pivot:
less.append(a )
elif element > pivot:
greater.append(a )
else:
equal.append(a )
return less, equal, greater
def _UpperCAmelCase ( a : list , a : int ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(a ) or index < 0:
return None
snake_case__ = items[random.randint(0 , len(a ) - 1 )]
snake_case__ = 0
snake_case__ , snake_case__ , snake_case__ = _partition(a , a )
snake_case__ = len(a )
snake_case__ = len(a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(a , a )
# must be in larger
else:
return quick_select(a , index - (m + count) )
| 654 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PIL.Image.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__)
snake_case__ = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case__ = get_size_dict(UpperCamelCase__)
snake_case__ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""")
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PIL.Image.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''')
return resize(
UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
snake_case__ = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''')
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Any , ):
'''simple docstring'''
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(UpperCamelCase__)
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""")
snake_case__ = make_list_of_images(UpperCamelCase__)
if not valid_images(UpperCamelCase__):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(UpperCamelCase__) for image in images]
if do_resize:
snake_case__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__) for image in images]
snake_case__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images]
snake_case__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
| 654 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 1 |
import unittest
from transformers import DonutProcessor
a__ = """naver-clova-ix/donut-base"""
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = DonutProcessor.from_pretrained(UpperCamelCase__)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
snake_case__ = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
snake_case__ = self.processor.tokenajson(UpperCamelCase__)
self.assertDictEqual(UpperCamelCase__ , UpperCamelCase__)
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize"""))
self.assertTrue(hasattr(UpperCamelCase__ , """size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr"""))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__)
self.assertIsInstance(encoding.boxes , UpperCamelCase__)
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""")
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__)
self.assertListEqual(encoding.boxes , UpperCamelCase__)
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__)
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 654 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
snake_case__ , snake_case__ = get_aligned_output_features_output_indices(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , ["""c"""])
self.assertEqual(UpperCamelCase__ , [2])
# Out indices set to match out features
snake_case__ , snake_case__ = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCamelCase__ , UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""])
self.assertEqual(UpperCamelCase__ , [0, 2])
# Out features set to match out indices
snake_case__ , snake_case__ = get_aligned_output_features_output_indices(UpperCamelCase__ , [0, 2] , UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""])
self.assertEqual(UpperCamelCase__ , [0, 2])
# Out features selected from negative indices
snake_case__ , snake_case__ = get_aligned_output_features_output_indices(UpperCamelCase__ , [-3, -1] , UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""])
self.assertEqual(UpperCamelCase__ , [-3, -1])
def __magic_name__ ( self : Any):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCamelCase__)
# Out features must be a list
with self.assertRaises(UpperCamelCase__):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase__):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase__):
verify_out_features_out_indices(UpperCamelCase__ , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase__):
verify_out_features_out_indices(UpperCamelCase__ , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase__):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(UpperCamelCase__):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase__):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = BackboneMixin()
snake_case__ = ["""a""", """b""", """c"""]
snake_case__ = ["""a""", """c"""]
snake_case__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
snake_case__ = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
snake_case__ = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 654 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _UpperCAmelCase ( ):
snake_case__ = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
snake_case__ = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(a )
# Let's go
snake_case__ = parser.parse_args()
if not hasattr(a , """func""" ):
parser.print_help()
exit(1 )
# Run
snake_case__ = args.func(a )
service.run()
if __name__ == "__main__":
main()
| 654 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 1 |
a__ = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a__ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 654 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 1 |
from pathlib import Path
import fire
def _UpperCAmelCase ( a : str , a : str , a : int ):
snake_case__ = Path(a )
snake_case__ = Path(a )
dest_dir.mkdir(exist_ok=a )
for path in src_dir.iterdir():
snake_case__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case__ = dest_dir.joinpath(path.name )
print(a )
dest_path.open("""w""" ).write("""\n""".join(a ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """https://openaipublic.azureedge.net/jukebox/models/"""
a__ = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _UpperCAmelCase ( a : Optional[Any] ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case__ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case__ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case__ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case__ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case__ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case__ = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case__ = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case__ = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _UpperCAmelCase ( a : Optional[Any] , a : Optional[Any] , a : Any , a : Dict ):
snake_case__ = {}
import re
snake_case__ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case__ = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case__ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case__ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case__ = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case__ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case__ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case__ = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case__ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(a ):
snake_case__ = re_encoder_block_conv_in.match(a )
snake_case__ = regex_match.groups()
snake_case__ = int(groups[2] ) * 2 + int(groups[3] )
snake_case__ = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
snake_case__ = re_encoder_block_conv_in.sub(a , a )
elif re_encoder_block_resnet.fullmatch(a ):
snake_case__ = re_encoder_block_resnet.match(a )
snake_case__ = regex_match.groups()
snake_case__ = int(groups[2] ) * 2 + int(groups[3] )
snake_case__ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case__ = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
snake_case__ = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case__ = prefix + resnet_block
snake_case__ = re_encoder_block_resnet.sub(a , a )
elif re_encoder_block_proj_out.fullmatch(a ):
snake_case__ = re_encoder_block_proj_out.match(a )
snake_case__ = regex_match.groups()
snake_case__ = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
snake_case__ = re_encoder_block_proj_out.sub(a , a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(a ):
snake_case__ = re_decoder_block_conv_out.match(a )
snake_case__ = regex_match.groups()
snake_case__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case__ = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
snake_case__ = re_decoder_block_conv_out.sub(a , a )
elif re_decoder_block_resnet.fullmatch(a ):
snake_case__ = re_decoder_block_resnet.match(a )
snake_case__ = regex_match.groups()
snake_case__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case__ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case__ = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
snake_case__ = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case__ = prefix + resnet_block
snake_case__ = re_decoder_block_resnet.sub(a , a )
elif re_decoder_block_proj_in.fullmatch(a ):
snake_case__ = re_decoder_block_proj_in.match(a )
snake_case__ = regex_match.groups()
snake_case__ = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
snake_case__ = re_decoder_block_proj_in.sub(a , a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(a ):
snake_case__ = re_prior_cond_conv_out.match(a )
snake_case__ = regex_match.groups()
snake_case__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case__ = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
snake_case__ = re_prior_cond_conv_out.sub(a , a )
elif re_prior_cond_resnet.fullmatch(a ):
snake_case__ = re_prior_cond_resnet.match(a )
snake_case__ = regex_match.groups()
snake_case__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case__ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case__ = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
snake_case__ = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case__ = prefix + resnet_block
snake_case__ = re_prior_cond_resnet.sub(a , a )
elif re_prior_cond_proj_in.fullmatch(a ):
snake_case__ = re_prior_cond_proj_in.match(a )
snake_case__ = regex_match.groups()
snake_case__ = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
snake_case__ = re_prior_cond_proj_in.sub(a , a )
# keep original key
else:
snake_case__ = original_key
snake_case__ = replace_key(a )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
snake_case__ = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
snake_case__ = original_key
snake_case__ = original_key
snake_case__ = value
return new_dict
@torch.no_grad()
def _UpperCAmelCase ( a : int=None , a : Optional[Any]=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
snake_case__ = requests.get(F'''{PREFIX}{file}''' , allow_redirects=a )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=a )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , """wb""" ).write(r.content )
snake_case__ = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case__ = JukeboxConfig.from_pretrained(a )
snake_case__ = JukeboxModel(a )
snake_case__ = []
snake_case__ = {}
for i, dict_name in enumerate(a ):
snake_case__ = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["""model"""]
snake_case__ = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case__ = old_dic[k]
elif k.endswith(""".w""" ):
snake_case__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case__ = old_dic[k]
else:
snake_case__ = old_dic[k]
snake_case__ = """vqvae""" if i == 0 else F'''priors.{3 - i}'''
snake_case__ = fix_jukebox_keys(a , model.state_dict() , a , a )
weight_dict.append(a )
snake_case__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(a )
for i in range(len(a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(a ).mkdir(exist_ok=a )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(a , a )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
return weight_dict
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 654 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a__ = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : str
_lowercase : Optional[str] = None
_lowercase : Optional[Union[str, int]] = None
_lowercase : Optional[Union[str, int]] = None
_lowercase : Optional[Union[str, int]] = None
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = _str_to_version_tuple(self.version_str)
def __repr__( self : List[str]):
'''simple docstring'''
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return self.major, self.minor, self.patch
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__):
return Version(UpperCamelCase__)
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
return other
raise TypeError(F'''{other} (type {type(UpperCamelCase__)}) cannot be compared to version.''')
def __eq__( self : List[Any] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
try:
snake_case__ = self._validate_operand(UpperCamelCase__)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self._validate_operand(UpperCamelCase__)
return self.tuple < other.tuple
def __hash__( self : Any):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def __magic_name__ ( cls : Any , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return self.version_str
def _UpperCAmelCase ( a : List[Any] ):
snake_case__ = _VERSION_REG.match(a )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(a ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _UpperCAmelCase ( a : Tuple ):
return ".".join(str(a ) for v in version_tuple )
| 654 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 1 |
def _UpperCAmelCase ( a : float , a : float ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = ShapEImgaImgPipeline
_lowercase : List[Any] = ['''image''']
_lowercase : Optional[int] = ['''image''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Optional[int] = False
@property
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : int):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return 8
@property
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
snake_case__ = CLIPVisionModel(UpperCamelCase__)
return model
@property
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
@property
def __magic_name__ ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_6,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 3_2,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
snake_case__ = PriorTransformer(**UpperCamelCase__)
return model
@property
def __magic_name__ ( self : int):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = {
"""param_shapes""": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 1_2,
"""background""": (
0.1,
0.1,
0.1,
),
}
snake_case__ = ShapERenderer(**UpperCamelCase__)
return model
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = self.dummy_prior
snake_case__ = self.dummy_image_encoder
snake_case__ = self.dummy_image_processor
snake_case__ = self.dummy_renderer
snake_case__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_0_2_4 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase__ , clip_sample=UpperCamelCase__ , clip_sample_range=1.0 , )
snake_case__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any]=0):
'''simple docstring'''
snake_case__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 3_2,
"""output_type""": """np""",
}
return inputs
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = """cpu"""
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase__)
snake_case__ = pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = pipe(**self.get_dummy_inputs(UpperCamelCase__))
snake_case__ = output.images[0]
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
snake_case__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = torch_device == """cpu"""
snake_case__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , )
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase__)
snake_case__ = pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = 1
snake_case__ = 2
snake_case__ = self.get_dummy_inputs(UpperCamelCase__)
for key in inputs.keys():
if key in self.batch_params:
snake_case__ = batch_size * [inputs[key]]
snake_case__ = pipe(**UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""")
snake_case__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""")
snake_case__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""")
snake_case__ = pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(0)
snake_case__ = pipe(
UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="""np""" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__)
| 654 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = BioGptTokenizer
_lowercase : List[Any] = False
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
snake_case__ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
snake_case__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""") as fp:
fp.write(json.dumps(UpperCamelCase__))
with open(self.merges_file , """w""") as fp:
fp.write("""\n""".join(UpperCamelCase__))
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = """lower newer"""
snake_case__ = """lower newer"""
return input_text, output_text
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = BioGptTokenizer(self.vocab_file , self.merges_file)
snake_case__ = """lower"""
snake_case__ = ["""low""", """er</w>"""]
snake_case__ = tokenizer.tokenize(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = tokens + ["""<unk>"""]
snake_case__ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , UpperCamelCase__)
@slow
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""")
snake_case__ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__)
snake_case__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__)
snake_case__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__)
snake_case__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 654 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 654 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a__ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
a__ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
a__ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def __magic_name__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Any="auto" , UpperCamelCase__ : Optional[Any]=-1 , UpperCamelCase__ : List[Any]=0.9 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : str=5_0_0 , UpperCamelCase__ : Tuple="gpt2-large" , UpperCamelCase__ : Any=-1 , UpperCamelCase__ : Dict=1_0_2_4 , UpperCamelCase__ : int=2_5 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : int=2_5 , ):
'''simple docstring'''
snake_case__ = compute_mauve(
p_text=UpperCamelCase__ , q_text=UpperCamelCase__ , p_features=UpperCamelCase__ , q_features=UpperCamelCase__ , p_tokens=UpperCamelCase__ , q_tokens=UpperCamelCase__ , num_buckets=UpperCamelCase__ , pca_max_data=UpperCamelCase__ , kmeans_explained_var=UpperCamelCase__ , kmeans_num_redo=UpperCamelCase__ , kmeans_max_iter=UpperCamelCase__ , featurize_model_name=UpperCamelCase__ , device_id=UpperCamelCase__ , max_text_length=UpperCamelCase__ , divergence_curve_discretization_size=UpperCamelCase__ , mauve_scaling_factor=UpperCamelCase__ , verbose=UpperCamelCase__ , seed=UpperCamelCase__ , )
return out
| 654 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 654 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a__ = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def _UpperCAmelCase ( a : str ):
snake_case__ = test_results.split(""" """ )
snake_case__ = 0
snake_case__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case__ = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCAmelCase ( a : Dict ):
snake_case__ = {}
snake_case__ = None
snake_case__ = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , a ):
snake_case__ = True
snake_case__ = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
snake_case__ = line
snake_case__ = False
return failures
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = title
snake_case__ = doc_test_results["""time_spent"""].split(""",""")[0]
snake_case__ = doc_test_results["""success"""]
snake_case__ = doc_test_results["""failures"""]
snake_case__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case__ = doc_test_results
@property
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = [self._time_spent]
snake_case__ = 0
for time in time_spent:
snake_case__ = time.split(""":""")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase__) == 1:
snake_case__ = [0, 0, time_parts[0]]
snake_case__ , snake_case__ , snake_case__ = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
snake_case__ , snake_case__ , snake_case__ = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F'''{int(UpperCamelCase__)}h{int(UpperCamelCase__)}m{int(UpperCamelCase__)}s'''
@property
def __magic_name__ ( self : str):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = 4_0
snake_case__ = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCamelCase__ , UpperCamelCase__)}
snake_case__ = """"""
for category, failures in category_failures.items():
if len(UpperCamelCase__) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase__)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(UpperCamelCase__)
@staticmethod
def __magic_name__ ( ):
'''simple docstring'''
snake_case__ = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("""Sending the following payload""")
print(json.dumps({"""blocks""": json.loads(UpperCamelCase__)}))
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=UpperCamelCase__ , )
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
print("""Sending the following payload""")
print(json.dumps({"""blocks""": json.loads(self.payload)}))
snake_case__ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
snake_case__ = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=UpperCamelCase__ , )
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
snake_case__ = """"""
for key, value in failures.items():
snake_case__ = value[:2_0_0] + """ [Truncated]""" if len(UpperCamelCase__) > 2_5_0 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
snake_case__ = job_name
snake_case__ = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
snake_case__ = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""")
snake_case__ = self.doc_test_results.pop("""job_link""")
self.doc_test_results.pop("""failures""")
self.doc_test_results.pop("""success""")
self.doc_test_results.pop("""time_spent""")
snake_case__ = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase__: t[0])
for job, job_result in sorted_dict:
if len(job_result["""failures"""]):
snake_case__ = F'''*Num failures* :{len(job_result['failed'])} \n'''
snake_case__ = job_result["""failures"""]
snake_case__ = self.get_reply_blocks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text=UpperCamelCase__)
print("""Sending the following reply""")
print(json.dumps({"""blocks""": blocks}))
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=UpperCamelCase__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1)
def _UpperCAmelCase ( ):
snake_case__ = os.environ["""GITHUB_RUN_ID"""]
snake_case__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
snake_case__ = requests.get(a ).json()
snake_case__ = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(a ):
snake_case__ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , a )
return {}
def _UpperCAmelCase ( a : str ):
snake_case__ = {}
if os.path.exists(a ):
snake_case__ = os.listdir(a )
for file in files:
try:
with open(os.path.join(a , a ) , encoding="""utf-8""" ) as f:
snake_case__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(a , a )}.''' ) from e
return _artifact
def _UpperCAmelCase ( ):
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = name
snake_case__ = []
def __str__( self : Dict):
'''simple docstring'''
return self.name
def __magic_name__ ( self : str , UpperCamelCase__ : str):
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path})
snake_case__ = {}
snake_case__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case__ = directory
if artifact_name not in _available_artifacts:
snake_case__ = Artifact(a )
_available_artifacts[artifact_name].add_path(a )
return _available_artifacts
if __name__ == "__main__":
a__ = get_job_links()
a__ = retrieve_available_artifacts()
a__ = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a__ = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a__ = github_actions_job_links.get("""run_doctests""")
a__ = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
a__ = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
a__ , a__ , a__ = handle_test_results(artifact["""stats"""])
a__ = failed
a__ = success
a__ = time_spent[1:-1] + """, """
a__ = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
a__ = line.replace("""FAILED """, """""")
a__ = line.split()[0].replace("""\n""", """""")
if "::" in line:
a__ , a__ = line.split("""::""")
else:
a__ , a__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a__ = all_failures[test] if test in all_failures else """N/A"""
a__ = failure
break
a__ = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 654 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 1 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None):
'''simple docstring'''
snake_case__ = data
snake_case__ = previous
snake_case__ = next_node
def __str__( self : str):
'''simple docstring'''
return F'''{self.data}'''
def __magic_name__ ( self : str):
'''simple docstring'''
return self.data
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return self.next
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = head
def __iter__( self : Any):
'''simple docstring'''
return self
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
snake_case__ = self.current.get_data()
snake_case__ = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict):
'''simple docstring'''
snake_case__ = None # First node in list
snake_case__ = None # Last node in list
def __str__( self : Dict):
'''simple docstring'''
snake_case__ = self.head
snake_case__ = []
while current is not None:
nodes.append(current.get_data())
snake_case__ = current.get_next()
return " ".join(str(UpperCamelCase__) for node in nodes)
def __contains__( self : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.head
while current:
if current.get_data() == value:
return True
snake_case__ = current.get_next()
return False
def __iter__( self : Optional[Any]):
'''simple docstring'''
return LinkedListIterator(self.head)
def __magic_name__ ( self : Any):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __magic_name__ ( self : Dict , UpperCamelCase__ : Node):
'''simple docstring'''
if self.head is None:
snake_case__ = node
snake_case__ = node
else:
self.insert_before_node(self.head , UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Node):
'''simple docstring'''
if self.head is None:
self.set_head(UpperCamelCase__)
else:
self.insert_after_node(self.tail , UpperCamelCase__)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = Node(UpperCamelCase__)
if self.head is None:
self.set_head(UpperCamelCase__)
else:
self.set_tail(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Node , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = node
snake_case__ = node.previous
if node.get_previous() is None:
snake_case__ = node_to_insert
else:
snake_case__ = node_to_insert
snake_case__ = node_to_insert
def __magic_name__ ( self : str , UpperCamelCase__ : Node , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = node
snake_case__ = node.next
if node.get_next() is None:
snake_case__ = node_to_insert
else:
snake_case__ = node_to_insert
snake_case__ = node_to_insert
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = 1
snake_case__ = Node(UpperCamelCase__)
snake_case__ = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase__ , UpperCamelCase__)
return
current_position += 1
snake_case__ = node.next
self.insert_after_node(self.tail , UpperCamelCase__)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.head
while node:
if node.get_data() == item:
return node
snake_case__ = node.get_next()
raise Exception("""Node not found""")
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
if (node := self.get_node(UpperCamelCase__)) is not None:
if node == self.head:
snake_case__ = self.head.get_next()
if node == self.tail:
snake_case__ = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase__)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : Node):
'''simple docstring'''
if node.get_next():
snake_case__ = node.previous
if node.get_previous():
snake_case__ = node.next
snake_case__ = None
snake_case__ = None
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self.head is None
def _UpperCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 1 |
a__ = 0 # The first color of the flag.
a__ = 1 # The second color of the flag.
a__ = 2 # The third color of the flag.
a__ = (red, white, blue)
def _UpperCAmelCase ( a : list ):
if not sequence:
return []
if len(a ) == 1:
return list(a )
snake_case__ = 0
snake_case__ = len(a ) - 1
snake_case__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case__ , snake_case__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case__ , snake_case__ = sequence[high], sequence[mid]
high -= 1
else:
snake_case__ = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = input("""Enter numbers separated by commas:\n""").strip()
a__ = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 654 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a__ = """hf-internal-testing/tiny-random-bert"""
a__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
a__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = cached_file(UpperCamelCase__ , UpperCamelCase__)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__)))
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""")) as f:
snake_case__ = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__))
self.assertTrue(os.path.isfile(UpperCamelCase__))
# File is cached at the same place the second time.
snake_case__ = cached_file(UpperCamelCase__ , UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
# Using a specific revision to test the full commit hash.
snake_case__ = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""9b8c223""")
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__))
def __magic_name__ ( self : str):
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier"""):
snake_case__ = cached_file("""tiny-random-bert""" , UpperCamelCase__)
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier"""):
snake_case__ = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""aaaa""")
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named"""):
snake_case__ = cached_file(UpperCamelCase__ , """conf""")
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named"""):
snake_case__ = cached_file(UpperCamelCase__ , """conf""")
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""")) as f:
snake_case__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , """.no_exist""" , UpperCamelCase__ , """conf""")))
snake_case__ = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_missing_entries=UpperCamelCase__)
self.assertIsNone(UpperCamelCase__)
snake_case__ = cached_file(UpperCamelCase__ , """conf""" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__)
self.assertIsNone(UpperCamelCase__)
snake_case__ = mock.Mock()
snake_case__ = 5_0_0
snake_case__ = {}
snake_case__ = HTTPError
snake_case__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase__) as mock_head:
snake_case__ = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_connection_errors=UpperCamelCase__)
self.assertIsNone(UpperCamelCase__)
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__ ( self : Any):
'''simple docstring'''
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__))
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__))
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__))
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt"""))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier"""):
get_file_from_repo("""bert-base-case""" , UpperCamelCase__)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier"""):
get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ , revision="""ahaha""")
snake_case__ = get_file_from_repo("""bert-base-cased""" , UpperCamelCase__)
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case__ = json.loads(open(UpperCamelCase__ , """r""").read())
self.assertEqual(config["""hidden_size"""] , 7_6_8)
def __magic_name__ ( self : int):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = Path(UpperCamelCase__) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , """a.txt""") , str(UpperCamelCase__))
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , """b.txt"""))
| 654 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 1 |
import qiskit
def _UpperCAmelCase ( a : int , a : int ):
snake_case__ = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
snake_case__ = qiskit.QuantumCircuit(a , a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
snake_case__ = qiskit.execute(a , a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 654 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 1 |
import re
from filelock import FileLock
try:
import nltk
a__ = True
except (ImportError, ModuleNotFoundError):
a__ = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _UpperCAmelCase ( a : str ):
re.sub("""<n>""" , """""" , a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a ) )
| 654 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 1 |
def _UpperCAmelCase ( a : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _UpperCAmelCase ( a : dict[int, list[int]] ):
snake_case__ = 0
snake_case__ = len(a ) # No of vertices in graph
snake_case__ = [0] * n
snake_case__ = [False] * n
def dfs(a : List[str] , a : Any , a : List[str] , a : Union[str, Any] ):
snake_case__ = True
snake_case__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(a , a , a , id_ )
snake_case__ = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case__ = min(low[at] , low[to] )
snake_case__ = []
for i in range(a ):
if not visited[i]:
dfs(a , -1 , a , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 1 |
from manim import *
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = Rectangle(height=0.5 , width=0.5)
snake_case__ = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
snake_case__ = Rectangle(height=0.25 , width=0.25)
snake_case__ = [mem.copy() for i in range(6)]
snake_case__ = [mem.copy() for i in range(6)]
snake_case__ = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
snake_case__ = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
snake_case__ = VGroup(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
snake_case__ = Text("""CPU""" , font_size=2_4)
snake_case__ = Group(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__)
cpu.move_to([-2.5, -0.5, 0])
self.add(UpperCamelCase__)
snake_case__ = [mem.copy() for i in range(4)]
snake_case__ = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
snake_case__ = Text("""GPU""" , font_size=2_4)
snake_case__ = Group(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__)
gpu.move_to([-1, -1, 0])
self.add(UpperCamelCase__)
snake_case__ = [mem.copy() for i in range(6)]
snake_case__ = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
snake_case__ = Text("""Model""" , font_size=2_4)
snake_case__ = Group(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__)
model.move_to([3, -1.0, 0])
self.add(UpperCamelCase__)
snake_case__ = []
snake_case__ = []
for i, rect in enumerate(UpperCamelCase__):
snake_case__ = fill.copy().set_fill(UpperCamelCase__ , opacity=0.8)
target.move_to(UpperCamelCase__)
model_arr.append(UpperCamelCase__)
snake_case__ = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(UpperCamelCase__ , opacity=0.8)
cpu_target.move_to(cpu_left_col_base[i])
model_cpu_arr.append(UpperCamelCase__)
self.add(*UpperCamelCase__ , *UpperCamelCase__)
snake_case__ = [meta_mem.copy() for i in range(6)]
snake_case__ = [meta_mem.copy() for i in range(6)]
snake_case__ = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
snake_case__ = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
snake_case__ = VGroup(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
snake_case__ = Text("""Disk""" , font_size=2_4)
snake_case__ = Group(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__)
disk.move_to([-4, -1.25, 0])
self.add(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = Square(side_length=2.2)
key.move_to([-5, 2, 0])
snake_case__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0])
self.add(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(UpperCamelCase__)
snake_case__ = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0])
self.play(Write(UpperCamelCase__))
snake_case__ = Square(0.3)
input.set_fill(UpperCamelCase__ , opacity=1.0)
input.set_stroke(width=0.0)
input.next_to(model_base[0] , UpperCamelCase__ , buff=0.5)
self.play(Write(UpperCamelCase__))
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCamelCase__ , buff=0.02)
self.play(MoveToTarget(UpperCamelCase__))
self.play(FadeOut(UpperCamelCase__))
snake_case__ = Arrow(start=UpperCamelCase__ , end=UpperCamelCase__ , color=UpperCamelCase__ , buff=0.5)
a.next_to(model_arr[0].get_left() , UpperCamelCase__ , buff=0.2)
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0])
snake_case__ = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0])
self.play(Write(UpperCamelCase__ , run_time=3))
snake_case__ = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(UpperCamelCase__) , Circumscribe(model_arr[0] , color=UpperCamelCase__ , **UpperCamelCase__) , Circumscribe(model_cpu_arr[0] , color=UpperCamelCase__ , **UpperCamelCase__) , Circumscribe(gpu_rect[0] , color=UpperCamelCase__ , **UpperCamelCase__) , )
self.play(MoveToTarget(model_cpu_arr[0]))
snake_case__ = a.copy()
for i in range(6):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCamelCase__ , buff=0.2)
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02)
snake_case__ = AnimationGroup(
FadeOut(UpperCamelCase__ , run_time=0.5) , MoveToTarget(UpperCamelCase__ , run_time=0.5) , FadeIn(UpperCamelCase__ , run_time=0.5) , lag_ratio=0.2)
self.play(UpperCamelCase__)
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i])
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0])
if i >= 1:
snake_case__ = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCamelCase__) , Circumscribe(cpu_left_col_base[i] , **UpperCamelCase__) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCamelCase__ , **UpperCamelCase__) , Circumscribe(gpu_rect[0] , color=UpperCamelCase__ , **UpperCamelCase__) , Circumscribe(model_arr[i + 1] , color=UpperCamelCase__ , **UpperCamelCase__) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i]) , MoveToTarget(model_cpu_arr[i + 1]) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1])
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2)
self.play(
Circumscribe(model_arr[-1] , color=UpperCamelCase__ , **UpperCamelCase__) , Circumscribe(cpu_left_col_base[-1] , color=UpperCamelCase__ , **UpperCamelCase__) , Circumscribe(gpu_rect[0] , color=UpperCamelCase__ , **UpperCamelCase__) , )
self.play(MoveToTarget(model_cpu_arr[i]))
snake_case__ = a_c
snake_case__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5)
self.play(
FadeOut(UpperCamelCase__) , FadeOut(UpperCamelCase__ , run_time=0.5) , )
snake_case__ = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4)
step_a.move_to([2, 2, 0])
self.play(Write(UpperCamelCase__ , run_time=3) , MoveToTarget(UpperCamelCase__))
self.wait()
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _UpperCAmelCase ( a : int , a : Any ):
assert isinstance(a , a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCAmelCase ( a : Dict , a : str , a : Optional[Any] , a : int ):
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=a , keep_in_memory=a ).read()
_check_sql_dataset(a , a )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCAmelCase ( a : str , a : str , a : Tuple , a : List[str] ):
snake_case__ = tmp_path / """cache"""
snake_case__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ = features.copy() if features else default_expected_features
snake_case__ = (
Features({feature: Value(a ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=a , cache_dir=a ).read()
_check_sql_dataset(a , a )
def _UpperCAmelCase ( a : List[Any] ):
with contextlib.closing(sqlitea.connect(a ) ) as con:
snake_case__ = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _UpperCAmelCase ( a : str , a : int , a : Union[str, Any] ):
snake_case__ = tmp_path / """cache"""
snake_case__ = os.path.join(a , """tmp.sql""" )
snake_case__ = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=a ).read()
SqlDatasetWriter(a , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
snake_case__ = iter_sql_file(a )
snake_case__ = iter_sql_file(a )
for rowa, rowa in zip(a , a ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( a : str , a : str , a : Union[str, Any] ):
snake_case__ = tmp_path / """cache"""
snake_case__ = os.path.join(a , """tmp.sql""" )
snake_case__ = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=a ).read()
SqlDatasetWriter(a , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
snake_case__ = iter_sql_file(a )
snake_case__ = iter_sql_file(a )
for rowa, rowa in zip(a , a ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( a : Tuple , a : Any , a : int ):
snake_case__ = tmp_path / """cache"""
snake_case__ = os.path.join(a , """tmp.sql""" )
snake_case__ = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=a ).read()
with pytest.raises(a ):
SqlDatasetWriter(a , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 654 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize"""))
self.assertTrue(hasattr(UpperCamelCase__ , """size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr"""))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__)
self.assertIsInstance(encoding.boxes , UpperCamelCase__)
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""")
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__)
self.assertListEqual(encoding.boxes , UpperCamelCase__)
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__)
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 654 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
a__ = data_utils.TransfoXLTokenizer
a__ = data_utils.TransfoXLCorpus
a__ = data_utils
a__ = data_utils
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Optional[int] , a : Optional[int] ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(a , """rb""" ) as fp:
snake_case__ = pickle.load(a , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
snake_case__ = corpus.vocab.__dict__
torch.save(a , a )
snake_case__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , a )
snake_case__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(a , a )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case__ = os.path.abspath(a )
snake_case__ = os.path.abspath(a )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case__ = TransfoXLConfig()
else:
snake_case__ = TransfoXLConfig.from_json_file(a )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ = TransfoXLLMHeadModel(a )
snake_case__ = load_tf_weights_in_transfo_xl(a , a , a )
# Save pytorch-model
snake_case__ = os.path.join(a , a )
snake_case__ = os.path.join(a , a )
print(F'''Save PyTorch model to {os.path.abspath(a )}''' )
torch.save(model.state_dict() , a )
print(F'''Save configuration file to {os.path.abspath(a )}''' )
with open(a , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
a__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 654 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 654 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 1 |
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Any = '''WhisperFeatureExtractor'''
_lowercase : List[Any] = '''WhisperTokenizer'''
def __init__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.feature_extractor
snake_case__ = False
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=True):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__)
def __call__( self : Any , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__)
snake_case__ = kwargs.pop("""audio""" , UpperCamelCase__)
snake_case__ = kwargs.pop("""sampling_rate""" , UpperCamelCase__)
snake_case__ = kwargs.pop("""text""" , UpperCamelCase__)
if len(UpperCamelCase__) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
snake_case__ = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__)
if text is not None:
snake_case__ = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__)
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ = encodings["""input_ids"""]
return inputs
def __magic_name__ ( self : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple="np"):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(UpperCamelCase__ , return_tensors=UpperCamelCase__)
| 654 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a__ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Path , UpperCamelCase__ : Union[str, None] = None , UpperCamelCase__ : Union[List[str], None] = None , UpperCamelCase__ : Union[str, List[str], None] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = [file for file in os.listdir(UpperCamelCase__) if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__))]
if identifier is not None:
snake_case__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
for n_ in n_identifier:
snake_case__ = [file for file in files if n_ not in file]
else:
snake_case__ = [file for file in files if n_identifier not in file]
snake_case__ = ignore_files or []
ignore_files.append("""__init__.py""")
snake_case__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , UpperCamelCase__)
if only_modules:
snake_case__ = file.split(""".""")[0]
try:
snake_case__ = getattr(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = doctest.DocTestSuite(UpperCamelCase__)
snake_case__ = unittest.TextTestRunner().run(UpperCamelCase__)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''')
else:
snake_case__ = doctest.testfile(str("""..""" / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = Path("""src/transformers""")
snake_case__ = """modeling"""
snake_case__ = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ , ignore_files=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = Path("""src/transformers""")
snake_case__ = """tokenization"""
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = Path("""src/transformers""")
snake_case__ = """configuration"""
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = Path("""src/transformers""")
snake_case__ = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(UpperCamelCase__ , n_identifier=UpperCamelCase__)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = Path("""docs/source""")
snake_case__ = ["""favicon.ico"""]
self.analyze_directory(UpperCamelCase__ , ignore_files=UpperCamelCase__ , only_modules=UpperCamelCase__)
| 654 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
a__ = pytest.mark.integration
@require_faiss
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(UpperCamelCase__) for x in np.arange(3_0).tolist()]})
return dset
def __magic_name__ ( self : Any):
'''simple docstring'''
import faiss
snake_case__ = self._create_dummy_dataset()
snake_case__ = dset.map(
lambda UpperCamelCase__ , UpperCamelCase__: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=UpperCamelCase__ , keep_in_memory=UpperCamelCase__)
snake_case__ = dset.add_faiss_index("""vecs""" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
snake_case__ , snake_case__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""")
dset.drop_index("""vecs""")
def __magic_name__ ( self : str):
'''simple docstring'''
import faiss
snake_case__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name="""vecs""" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
snake_case__ , snake_case__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
import faiss
snake_case__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase__) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name)
dset.load_faiss_index("""vecs2""" , tmp_file.name)
os.unlink(tmp_file.name)
snake_case__ , snake_case__ = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name="""vecs""")
dset.drop_index("""vecs""")
self.assertRaises(UpperCamelCase__ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa)))
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from elasticsearch import Elasticsearch
snake_case__ = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""") as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""") as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""") as mocked_bulk:
snake_case__ = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 3_0)
snake_case__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 2_9}]}}
snake_case__ = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=UpperCamelCase__)
snake_case__ , snake_case__ = dset.get_nearest_examples("""filename""" , """my_name-train_29""")
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""")
@require_faiss
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
import faiss
snake_case__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
snake_case__ = np.zeros(5 , dtype=np.floataa)
snake_case__ = 1
snake_case__ , snake_case__ = index.search(UpperCamelCase__)
self.assertRaises(UpperCamelCase__ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
snake_case__ = np.eye(5 , dtype=np.floataa)[::-1]
snake_case__ , snake_case__ = index.search_batch(UpperCamelCase__)
self.assertRaises(UpperCamelCase__ , index.search_batch , queries[0])
snake_case__ = [scores[0] for scores in total_scores]
snake_case__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase__) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
import faiss
snake_case__ = FaissIndex(string_factory="""Flat""")
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
snake_case__ = FaissIndex(string_factory="""LSH""")
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(UpperCamelCase__):
snake_case__ = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5))
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
import faiss
snake_case__ = faiss.IndexFlat(5)
snake_case__ = FaissIndex(custom_index=UpperCamelCase__)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
import faiss
snake_case__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase__) as tmp_file:
index.save(tmp_file.name)
snake_case__ = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
snake_case__ = np.zeros(5 , dtype=np.floataa)
snake_case__ = 1
snake_case__ , snake_case__ = index.search(UpperCamelCase__)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def _UpperCAmelCase ( a : int ):
import faiss
snake_case__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
snake_case__ = """index.faiss"""
snake_case__ = F'''mock://{index_name}'''
index.save(a , storage_options=mockfs.storage_options )
snake_case__ = FaissIndex.load(a , storage_options=mockfs.storage_options )
snake_case__ = np.zeros(5 , dtype=np.floataa )
snake_case__ = 1
snake_case__ , snake_case__ = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Dict):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""") as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""") as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""") as mocked_bulk:
snake_case__ = Elasticsearch()
snake_case__ = {"""acknowledged""": True}
snake_case__ = ElasticSearchIndex(es_client=UpperCamelCase__)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(["""foo""", """bar""", """foobar"""])
# single query
snake_case__ = """foo"""
snake_case__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
snake_case__ , snake_case__ = index.search(UpperCamelCase__)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
snake_case__ = """foo"""
snake_case__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
snake_case__ , snake_case__ = index.search(UpperCamelCase__ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
snake_case__ = ["""foo""", """bar""", """foobar"""]
snake_case__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
snake_case__ , snake_case__ = index.search_batch(UpperCamelCase__)
snake_case__ = [scores[0] for scores in total_scores]
snake_case__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase__) , 0)
self.assertListEqual([1, 1, 1] , UpperCamelCase__)
# batched queries with timeout
snake_case__ = ["""foo""", """bar""", """foobar"""]
snake_case__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
snake_case__ , snake_case__ = index.search_batch(UpperCamelCase__ , request_timeout=3_0)
snake_case__ = [scores[0] for scores in total_scores]
snake_case__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase__) , 0)
self.assertListEqual([1, 1, 1] , UpperCamelCase__)
| 654 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = len(a )
snake_case__ = []
for i in range(len(a ) - pat_len + 1 ):
snake_case__ = True
for j in range(a ):
if s[i + j] != pattern[j]:
snake_case__ = False
break
if match_found:
position.append(a )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 654 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def _UpperCAmelCase ( a : Optional[int] , a : Any=1.0 , a : Dict=None , a : Tuple=None ):
if rng is None:
snake_case__ = global_rng
snake_case__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Optional[Any]=4_0_0 , UpperCamelCase__ : Dict=2_0_0_0 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : Tuple=4_4_1_0_0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = min_seq_length
snake_case__ = max_seq_length
snake_case__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ = spectrogram_length
snake_case__ = feature_size
snake_case__ = num_audio_channels
snake_case__ = hop_length
snake_case__ = chunk_length
snake_case__ = sampling_rate
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=False):
'''simple docstring'''
def _flatten(UpperCamelCase__ : Dict):
return list(itertools.chain(*UpperCamelCase__))
if equal_length:
snake_case__ = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
snake_case__ = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
snake_case__ = [np.asarray(UpperCamelCase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[Any] = TvltFeatureExtractor
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = TvltFeatureExtractionTester(self)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """spectrogram_length"""))
self.assertTrue(hasattr(UpperCamelCase__ , """feature_size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """num_audio_channels"""))
self.assertTrue(hasattr(UpperCamelCase__ , """hop_length"""))
self.assertTrue(hasattr(UpperCamelCase__ , """chunk_length"""))
self.assertTrue(hasattr(UpperCamelCase__ , """sampling_rate"""))
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = feat_extract_first.save_pretrained(UpperCamelCase__)[0]
check_json_file_has_correct_format(UpperCamelCase__)
snake_case__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__)
snake_case__ = feat_extract_first.to_dict()
snake_case__ = feat_extract_second.to_dict()
snake_case__ = dict_first.pop("""mel_filters""")
snake_case__ = dict_second.pop("""mel_filters""")
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__))
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = os.path.join(UpperCamelCase__ , """feat_extract.json""")
feat_extract_first.to_json_file(UpperCamelCase__)
snake_case__ = self.feature_extraction_class.from_json_file(UpperCamelCase__)
snake_case__ = feat_extract_first.to_dict()
snake_case__ = feat_extract_second.to_dict()
snake_case__ = dict_first.pop("""mel_filters""")
snake_case__ = dict_second.pop("""mel_filters""")
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__))
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
snake_case__ = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
snake_case__ = [np.asarray(UpperCamelCase__) for speech_input in speech_inputs]
# Test not batched input
snake_case__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
snake_case__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
snake_case__ = feature_extractor(
UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4_1_0_0 , mask_audio=UpperCamelCase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
snake_case__ = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case__ = np.asarray(UpperCamelCase__)
snake_case__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""")
# automatic decoding with librispeech
snake_case__ = ds.sort("""id""").select(range(UpperCamelCase__))[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = self._load_datasamples(1)
snake_case__ = TvltFeatureExtractor()
snake_case__ = feature_extractor(UpperCamelCase__ , return_tensors="""pt""").audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
snake_case__ = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4))
| 654 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any):
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(UpperCamelCase__) for s in shape])}.npy'''
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
def __magic_name__ ( self : str , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Optional[int]=(4, 4, 6_4, 6_4) , UpperCamelCase__ : Tuple=False):
'''simple docstring'''
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__)) , dtype=UpperCamelCase__)
return image
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]="CompVis/stable-diffusion-v1-4"):
'''simple docstring'''
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = """bf16""" if fpaa else None
snake_case__ , snake_case__ = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase__ , subfolder="""unet""" , dtype=UpperCamelCase__ , revision=UpperCamelCase__)
return model, params
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : List[str]=(4, 7_7, 7_6_8) , UpperCamelCase__ : str=False):
'''simple docstring'''
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__)) , dtype=UpperCamelCase__)
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
])
def __magic_name__ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
snake_case__ , snake_case__ = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=UpperCamelCase__)
snake_case__ = self.get_latents(UpperCamelCase__ , fpaa=UpperCamelCase__)
snake_case__ = self.get_encoder_hidden_states(UpperCamelCase__ , fpaa=UpperCamelCase__)
snake_case__ = model.apply(
{"""params""": params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
snake_case__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
snake_case__ = jnp.array(UpperCamelCase__ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2)
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
])
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ , snake_case__ = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=UpperCamelCase__)
snake_case__ = self.get_latents(UpperCamelCase__ , shape=(4, 4, 9_6, 9_6) , fpaa=UpperCamelCase__)
snake_case__ = self.get_encoder_hidden_states(UpperCamelCase__ , shape=(4, 7_7, 1_0_2_4) , fpaa=UpperCamelCase__)
snake_case__ = model.apply(
{"""params""": params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
snake_case__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
snake_case__ = jnp.array(UpperCamelCase__ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2)
| 654 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 1 |
from __future__ import annotations
from statistics import mean
def _UpperCAmelCase ( a : list[int] , a : list[int] , a : int ):
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(a ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(a ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(a )
if len(a ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _UpperCAmelCase ( a : list[int] , a : int , a : list[int] ):
snake_case__ = [0] * no_of_processes
for i in range(a ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
a__ = 4
a__ = [2, 5, 3, 7]
a__ = [0, 0, 0, 0]
a__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a__ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 654 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
a__ = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
a__ = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
a__ = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int = CHRF.CHAR_ORDER , UpperCamelCase__ : int = CHRF.WORD_ORDER , UpperCamelCase__ : int = CHRF.BETA , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
snake_case__ = len(references[0])
if any(len(UpperCamelCase__) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
snake_case__ = [[refs[i] for refs in references] for i in range(UpperCamelCase__)]
snake_case__ = CHRF(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = sb_chrf.corpus_score(UpperCamelCase__ , UpperCamelCase__)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 654 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _UpperCAmelCase ( a : Any ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _UpperCAmelCase ( ):
snake_case__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=a )
snake_case__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(a )
EnvironmentCommand.register_subcommand(a )
TestCommand.register_subcommand(a )
RunBeamCommand.register_subcommand(a )
DummyDataCommand.register_subcommand(a )
# Parse args
snake_case__ , snake_case__ = parser.parse_known_args()
if not hasattr(a , """func""" ):
parser.print_help()
exit(1 )
snake_case__ = parse_unknown_args(a )
# Run
snake_case__ = args.func(a , **a )
service.run()
if __name__ == "__main__":
main()
| 654 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 654 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 1 |
from ...configuration_utils import PretrainedConfig
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Any = '''bert-generation'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any]=5_0_3_5_8 , UpperCamelCase__ : Tuple=1_0_2_4 , UpperCamelCase__ : List[Any]=2_4 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : Tuple=4_0_9_6 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[Any]=5_1_2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : int=1 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Tuple=True , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = use_cache
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def __magic_name__ ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
snake_case__ = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def __magic_name__ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = vqa_pipeline(UpperCamelCase__ , top_k=1)
self.assertEqual(
UpperCamelCase__ , [
[{"""score""": ANY(UpperCamelCase__), """answer""": ANY(UpperCamelCase__)}],
[{"""score""": ANY(UpperCamelCase__), """answer""": ANY(UpperCamelCase__)}],
] , )
@require_torch
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
snake_case__ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ = """How many cats are there?"""
snake_case__ = vqa_pipeline(image=UpperCamelCase__ , question="""How many cats are there?""" , top_k=2)
self.assertEqual(
UpperCamelCase__ , [{"""score""": ANY(UpperCamelCase__), """answer""": ANY(UpperCamelCase__)}, {"""score""": ANY(UpperCamelCase__), """answer""": ANY(UpperCamelCase__)}])
snake_case__ = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
UpperCamelCase__ , [{"""score""": ANY(UpperCamelCase__), """answer""": ANY(UpperCamelCase__)}, {"""score""": ANY(UpperCamelCase__), """answer""": ANY(UpperCamelCase__)}])
@slow
@require_torch
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""")
snake_case__ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ = """How many cats are there?"""
snake_case__ = vqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2)
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}])
snake_case__ = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}])
snake_case__ = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
pass
| 654 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 1 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _UpperCAmelCase ( a : Optional[int] , a : List[Any] ):
warnings.warn(a , a )
requires_backends(a , """sklearn""" )
return (preds == labels).mean()
def _UpperCAmelCase ( a : str , a : Any ):
warnings.warn(a , a )
requires_backends(a , """sklearn""" )
snake_case__ = simple_accuracy(a , a )
snake_case__ = fa_score(y_true=a , y_pred=a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _UpperCAmelCase ( a : List[Any] , a : Optional[Any] ):
warnings.warn(a , a )
requires_backends(a , """sklearn""" )
snake_case__ = pearsonr(a , a )[0]
snake_case__ = spearmanr(a , a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _UpperCAmelCase ( a : int , a : List[str] , a : str ):
warnings.warn(a , a )
requires_backends(a , """sklearn""" )
assert len(a ) == len(a ), F'''Predictions and labels have mismatched lengths {len(a )} and {len(a )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(a , a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(a , a )}
elif task_name == "mrpc":
return acc_and_fa(a , a )
elif task_name == "sts-b":
return pearson_and_spearman(a , a )
elif task_name == "qqp":
return acc_and_fa(a , a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(a , a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(a , a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(a , a )}
elif task_name == "rte":
return {"acc": simple_accuracy(a , a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(a , a )}
elif task_name == "hans":
return {"acc": simple_accuracy(a , a )}
else:
raise KeyError(a )
def _UpperCAmelCase ( a : List[str] , a : List[Any] , a : Optional[int] ):
warnings.warn(a , a )
requires_backends(a , """sklearn""" )
if len(a ) != len(a ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(a )} and {len(a )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(a , a )}
else:
raise KeyError(a )
| 654 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = '''lxmert'''
_lowercase : List[str] = {}
def __init__( self : str , UpperCamelCase__ : Optional[int]=3_0_5_2_2 , UpperCamelCase__ : int=7_6_8 , UpperCamelCase__ : Optional[Any]=1_2 , UpperCamelCase__ : List[Any]=9_5_0_0 , UpperCamelCase__ : Dict=1_6_0_0 , UpperCamelCase__ : Optional[int]=4_0_0 , UpperCamelCase__ : Optional[int]=3_0_7_2 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Any=5_1_2 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Optional[Any]=1E-12 , UpperCamelCase__ : List[str]=9 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Optional[Any]=2_0_4_8 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : str=6.67 , UpperCamelCase__ : str=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : int , ):
'''simple docstring'''
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = num_qa_labels
snake_case__ = num_object_labels
snake_case__ = num_attr_labels
snake_case__ = l_layers
snake_case__ = x_layers
snake_case__ = r_layers
snake_case__ = visual_feat_dim
snake_case__ = visual_pos_dim
snake_case__ = visual_loss_normalizer
snake_case__ = task_matched
snake_case__ = task_mask_lm
snake_case__ = task_obj_predict
snake_case__ = task_qa
snake_case__ = visual_obj_loss
snake_case__ = visual_attr_loss
snake_case__ = visual_feat_loss
snake_case__ = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**UpperCamelCase__)
| 654 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : TransformeraDModel , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : KarrasDiffusionSchedulers , UpperCamelCase__ : Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=UpperCamelCase__ , vae=UpperCamelCase__ , scheduler=UpperCamelCase__)
# create a imagenet -> id dictionary for easier use
snake_case__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""","""):
snake_case__ = int(UpperCamelCase__)
snake_case__ = dict(sorted(self.labels.items()))
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Union[str, List[str]]):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = list(UpperCamelCase__)
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : int = 5_0 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = len(UpperCamelCase__)
snake_case__ = self.transformer.config.sample_size
snake_case__ = self.transformer.config.in_channels
snake_case__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCamelCase__ , device=self.device , dtype=self.transformer.dtype , )
snake_case__ = torch.cat([latents] * 2) if guidance_scale > 1 else latents
snake_case__ = torch.tensor(UpperCamelCase__ , device=self.device).reshape(-1)
snake_case__ = torch.tensor([1_0_0_0] * batch_size , device=self.device)
snake_case__ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCamelCase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
snake_case__ = latent_model_input[: len(UpperCamelCase__) // 2]
snake_case__ = torch.cat([half, half] , dim=0)
snake_case__ = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = t
if not torch.is_tensor(UpperCamelCase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
snake_case__ = latent_model_input.device.type == """mps"""
if isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = torch.floataa if is_mps else torch.floataa
else:
snake_case__ = torch.intaa if is_mps else torch.intaa
snake_case__ = torch.tensor([timesteps] , dtype=UpperCamelCase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
snake_case__ = self.transformer(
UpperCamelCase__ , timestep=UpperCamelCase__ , class_labels=UpperCamelCase__).sample
# perform guidance
if guidance_scale > 1:
snake_case__ , snake_case__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
snake_case__ , snake_case__ = torch.split(UpperCamelCase__ , len(UpperCamelCase__) // 2 , dim=0)
snake_case__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
snake_case__ = torch.cat([half_eps, half_eps] , dim=0)
snake_case__ = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
snake_case__ , snake_case__ = torch.split(UpperCamelCase__ , UpperCamelCase__ , dim=1)
else:
snake_case__ = noise_pred
# compute previous image: x_t -> x_t-1
snake_case__ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
if guidance_scale > 1:
snake_case__ , snake_case__ = latent_model_input.chunk(2 , dim=0)
else:
snake_case__ = latent_model_input
snake_case__ = 1 / self.vae.config.scaling_factor * latents
snake_case__ = self.vae.decode(UpperCamelCase__).sample
snake_case__ = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case__ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
a__ = {
"""gpt-neox-20b""": 2_0_4_8,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]="<|endoftext|>" , UpperCamelCase__ : Optional[Any]="<|endoftext|>" , UpperCamelCase__ : Union[str, Any]="<|endoftext|>" , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__) != add_prefix_space:
snake_case__ = getattr(UpperCamelCase__ , pre_tok_state.pop("""type"""))
snake_case__ = add_prefix_space
snake_case__ = pre_tok_class(**UpperCamelCase__)
snake_case__ = add_prefix_space
def __magic_name__ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
snake_case__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : "Conversation"):
'''simple docstring'''
snake_case__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__) + [self.eos_token_id])
if len(UpperCamelCase__) > self.model_max_length:
snake_case__ = input_ids[-self.model_max_length :]
return input_ids
| 654 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 1 |
from __future__ import annotations
from math import pi, sqrt
def _UpperCAmelCase ( a : float , a : float ):
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 1 |
a__ = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
a__ = frozenset(["""prompt""", """negative_prompt"""])
a__ = frozenset([])
a__ = frozenset(["""image"""])
a__ = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
a__ = frozenset(["""image"""])
a__ = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
a__ = frozenset(["""prompt""", """image""", """negative_prompt"""])
a__ = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
a__ = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
a__ = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
a__ = frozenset(["""image""", """mask_image"""])
a__ = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
a__ = frozenset(["""example_image""", """image""", """mask_image"""])
a__ = frozenset(["""class_labels"""])
a__ = frozenset(["""class_labels"""])
a__ = frozenset(["""batch_size"""])
a__ = frozenset([])
a__ = frozenset(["""batch_size"""])
a__ = frozenset([])
a__ = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
a__ = frozenset(["""prompt""", """negative_prompt"""])
a__ = frozenset(["""input_tokens"""])
a__ = frozenset(["""input_tokens"""])
| 654 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 1 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any):
'''simple docstring'''
snake_case__ = {} # Mapping from char to TrieNode
snake_case__ = False
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : list[str]):
'''simple docstring'''
for word in words:
self.insert(UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = self
for char in word:
if char not in curr.nodes:
snake_case__ = TrieNode()
snake_case__ = curr.nodes[char]
snake_case__ = True
def __magic_name__ ( self : str , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = self
for char in word:
if char not in curr.nodes:
return False
snake_case__ = curr.nodes[char]
return curr.is_leaf
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
def _delete(UpperCamelCase__ : TrieNode , UpperCamelCase__ : str , UpperCamelCase__ : int) -> bool:
if index == len(UpperCamelCase__):
# If word does not exist
if not curr.is_leaf:
return False
snake_case__ = False
return len(curr.nodes) == 0
snake_case__ = word[index]
snake_case__ = curr.nodes.get(UpperCamelCase__)
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case__ = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1)
if delete_curr:
del curr.nodes[char]
return len(curr.nodes) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0)
def _UpperCAmelCase ( a : TrieNode , a : str ):
if node.is_leaf:
print(a , end=""" """ )
for key, value in node.nodes.items():
print_words(a , word + key )
def _UpperCAmelCase ( ):
snake_case__ = """banana bananas bandana band apple all beast""".split()
snake_case__ = TrieNode()
root.insert_many(a )
# print_words(root, "")
assert all(root.find(a ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _UpperCAmelCase ( a : str , a : bool ):
print(str(a ) , """works!""" if passes else """doesn't work :(""" )
def _UpperCAmelCase ( ):
assert test_trie()
def _UpperCAmelCase ( ):
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = KandinskyVaaPriorPipeline
_lowercase : Union[str, Any] = ['''prompt''']
_lowercase : List[str] = ['''prompt''', '''negative_prompt''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
_lowercase : str = False
@property
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : str):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : str):
'''simple docstring'''
return self.time_input_dim
@property
def __magic_name__ ( self : str):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
return 1_0_0
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
return tokenizer
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(UpperCamelCase__)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
snake_case__ = PriorTransformer(**UpperCamelCase__)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case__ = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
snake_case__ = CLIPVisionModelWithProjection(UpperCamelCase__)
return model
@property
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = self.dummy_prior
snake_case__ = self.dummy_image_encoder
snake_case__ = self.dummy_text_encoder
snake_case__ = self.dummy_tokenizer
snake_case__ = self.dummy_image_processor
snake_case__ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
snake_case__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def __magic_name__ ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = """cpu"""
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase__)
snake_case__ = pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = pipe(**self.get_dummy_inputs(UpperCamelCase__))
snake_case__ = output.image_embeds
snake_case__ = pipe(
**self.get_dummy_inputs(UpperCamelCase__) , return_dict=UpperCamelCase__ , )[0]
snake_case__ = image[0, -1_0:]
snake_case__ = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
snake_case__ = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@skip_mps
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = torch_device == """cpu"""
snake_case__ = True
snake_case__ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = torch_device == """cpu"""
snake_case__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 654 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize"""))
self.assertTrue(hasattr(UpperCamelCase__ , """size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr"""))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__)
self.assertIsInstance(encoding.boxes , UpperCamelCase__)
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""")
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__)
self.assertListEqual(encoding.boxes , UpperCamelCase__)
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__)
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 654 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=1_3 , UpperCamelCase__ : Union[str, Any]=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=9_9 , UpperCamelCase__ : Union[str, Any]=3_2 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[Any]=6_4 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : int=1_6 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=None , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : int=2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Optional[int]=1 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
snake_case__ = q_groups
snake_case__ = k_groups
snake_case__ = v_groups
snake_case__ = post_attention_groups
snake_case__ = intermediate_groups
snake_case__ = output_groups
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length])
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
snake_case__ = ids_tensor([self.batch_size] , self.num_choices)
snake_case__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = SqueezeBertModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __magic_name__ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = SqueezeBertForMaskedLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __magic_name__ ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = SqueezeBertForQuestionAnswering(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __magic_name__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = SqueezeBertForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = SqueezeBertForTokenClassification(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __magic_name__ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = self.num_choices
snake_case__ = SqueezeBertForMultipleChoice(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
snake_case__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
snake_case__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) = config_and_inputs
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase : Tuple = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : str = False
_lowercase : int = True
_lowercase : Tuple = False
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = SqueezeBertModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__ , dim=3_7)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase__)
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase__)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase__)
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = SqueezeBertModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""")
snake_case__ = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]])
snake_case__ = model(UpperCamelCase__)[0]
snake_case__ = torch.Size((1, 3))
self.assertEqual(output.shape , UpperCamelCase__)
snake_case__ = torch.tensor([[0.64_01, -0.03_49, -0.60_41]])
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-4))
| 654 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 | 1 |
from math import sqrt
def _UpperCAmelCase ( a : int = 100_0000 ):
snake_case__ = 0
snake_case__ = 0
snake_case__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 1 |
from collections.abc import Iterable
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : int | None = None):
'''simple docstring'''
snake_case__ = value
snake_case__ = None # Added in order to delete a node easier
snake_case__ = None
snake_case__ = None
def __repr__( self : List[str]):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value)
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Node | None = None):
'''simple docstring'''
snake_case__ = root
def __str__( self : Any):
'''simple docstring'''
return str(self.root)
def __magic_name__ ( self : str , UpperCamelCase__ : Node , UpperCamelCase__ : Node | None):
'''simple docstring'''
if new_children is not None: # reset its kids
snake_case__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(UpperCamelCase__): # If it is the right children
snake_case__ = new_children
else:
snake_case__ = new_children
else:
snake_case__ = new_children
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Node):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.root is None
def __magic_name__ ( self : int , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = Node(UpperCamelCase__) # create a new Node
if self.empty(): # if Tree is empty
snake_case__ = new_node # set its root
else: # Tree is not empty
snake_case__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case__ = new_node # We insert the new node in a leaf
break
else:
snake_case__ = parent_node.left
else:
if parent_node.right is None:
snake_case__ = new_node
break
else:
snake_case__ = parent_node.right
snake_case__ = parent_node
def __magic_name__ ( self : Optional[int] , *UpperCamelCase__ : Tuple):
'''simple docstring'''
for value in values:
self.__insert(UpperCamelCase__)
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : str):
'''simple docstring'''
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""")
else:
snake_case__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case__ = node.left if value < node.value else node.right
return node
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : Node | None = None):
'''simple docstring'''
if node is None:
if self.root is None:
return None
snake_case__ = self.root
if not self.empty():
while node.right is not None:
snake_case__ = node.right
return node
def __magic_name__ ( self : int , UpperCamelCase__ : Node | None = None):
'''simple docstring'''
if node is None:
snake_case__ = self.root
if self.root is None:
return None
if not self.empty():
snake_case__ = self.root
while node.left is not None:
snake_case__ = node.left
return node
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.search(UpperCamelCase__) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(UpperCamelCase__ , UpperCamelCase__)
elif node.left is None: # Has only right children
self.__reassign_nodes(UpperCamelCase__ , node.right)
elif node.right is None: # Has only left children
self.__reassign_nodes(UpperCamelCase__ , node.left)
else:
snake_case__ = self.get_max(
node.left) # Gets the max value of the left branch
self.remove(tmp_node.value) # type: ignore
snake_case__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __magic_name__ ( self : Any , UpperCamelCase__ : Node | None):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left)
yield from self.preorder_traverse(node.right)
def __magic_name__ ( self : str , UpperCamelCase__ : Tuple=None):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root)
else:
return traversal_function(self.root)
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : list , UpperCamelCase__ : Node | None):
'''simple docstring'''
if node:
self.inorder(UpperCamelCase__ , node.left)
arr.append(node.value)
self.inorder(UpperCamelCase__ , node.right)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = []
self.inorder(UpperCamelCase__ , UpperCamelCase__) # append all values to list using inorder traversal
return arr[k - 1]
def _UpperCAmelCase ( a : Node | None ):
snake_case__ = []
if curr_node is not None:
snake_case__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _UpperCAmelCase ( ):
snake_case__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
snake_case__ = BinarySearchTree()
for i in testlist:
t.insert(a )
# Prints all the elements of the list in order traversal
print(a )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(a )
print(a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 654 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 1 |
def _UpperCAmelCase ( a : float , a : float ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_0_0, 0.25) = }''')
print(F'''{price_plus_tax(1_25.50, 0.05) = }''')
| 654 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCAmelCase ( a : Optional[int] , a : Dict , a : Any=1e-12 ):
snake_case__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(a , axis=1 ) , a_min=a ) ).T
snake_case__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(a , axis=1 ) , a_min=a ) ).T
return jnp.matmul(a , norm_emb_a.T )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowercase : CLIPConfig
_lowercase : jnp.dtype = jnp.floataa
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = FlaxCLIPVisionModule(self.config.vision_config)
snake_case__ = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype)
snake_case__ = self.param("""concept_embeds""" , jax.nn.initializers.ones , (1_7, self.config.projection_dim))
snake_case__ = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
snake_case__ = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (1_7,))
snake_case__ = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self : Dict , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.vision_model(UpperCamelCase__)[1]
snake_case__ = self.visual_projection(UpperCamelCase__)
snake_case__ = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds)
snake_case__ = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
snake_case__ = 0.0
snake_case__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
snake_case__ = jnp.round(UpperCamelCase__ , 3)
snake_case__ = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__)
# Use a lower threshold if an image has any special care concept
snake_case__ = is_special_care * 0.01
snake_case__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
snake_case__ = jnp.round(UpperCamelCase__ , 3)
snake_case__ = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = CLIPConfig
_lowercase : Tuple = '''clip_input'''
_lowercase : Any = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : str , ):
'''simple docstring'''
if input_shape is None:
snake_case__ = (1, 2_2_4, 2_2_4, 3)
snake_case__ = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__)
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init)
def __magic_name__ ( self : Any , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None):
'''simple docstring'''
snake_case__ = jax.random.normal(UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ = jax.random.split(UpperCamelCase__)
snake_case__ = {"""params""": params_rng, """dropout""": dropout_rng}
snake_case__ = self.module.init(UpperCamelCase__ , UpperCamelCase__)["""params"""]
return random_params
def __call__( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : dict = None , ):
'''simple docstring'''
snake_case__ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa) , rngs={} , )
| 654 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _UpperCAmelCase ( a : List[Any] ):
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def _UpperCAmelCase ( a : str ):
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = metric_id
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = [MetricMock(lowercase_ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def _UpperCAmelCase ( a : Any , a : Union[str, Any] , a : List[Any] , a : Union[str, Any] , a : Tuple ):
if "tmp_path" in args:
snake_case__ = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(a , match="""https://huggingface.co/docs/evaluate""" ):
func(*a )
| 654 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , UpperCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , **UpperCamelCase__)
snake_case__ = Sql(
cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , sql=UpperCamelCase__ , con=UpperCamelCase__ , **UpperCamelCase__ , )
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , )
# Build dataset for splits
snake_case__ = self.builder.as_dataset(
split="""train""" , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory)
return dataset
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''')
snake_case__ = dataset
snake_case__ = name
snake_case__ = con
snake_case__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case__ = num_proc
snake_case__ = to_sql_kwargs
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.to_sql_kwargs.pop("""sql""" , UpperCamelCase__)
snake_case__ = self.to_sql_kwargs.pop("""con""" , UpperCamelCase__)
snake_case__ = self.to_sql_kwargs.pop("""index""" , UpperCamelCase__)
snake_case__ = self._write(index=UpperCamelCase__ , **self.to_sql_kwargs)
return written
def __magic_name__ ( self : int , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = args
snake_case__ = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
snake_case__ = query_table(
table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size) , indices=self.dataset._indices , )
snake_case__ = batch.to_pandas()
snake_case__ = df.to_sql(self.name , self.con , index=UpperCamelCase__ , **UpperCamelCase__)
return num_rows or len(UpperCamelCase__)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
snake_case__ , snake_case__ = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 654 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
a__ = 2_9_9_7_9_2_4_5_8
# Symbols
a__ , a__ , a__ , a__ = symbols("""ct x y z""")
def _UpperCAmelCase ( a : float ):
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def _UpperCAmelCase ( a : float ):
return 1 / sqrt(1 - beta(a ) ** 2 )
def _UpperCAmelCase ( a : float ):
return np.array(
[
[gamma(a ), -gamma(a ) * beta(a ), 0, 0],
[-gamma(a ) * beta(a ), gamma(a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _UpperCAmelCase ( a : float , a : np.ndarray | None = None ):
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
a__ = transform(2_9_9_7_9_2_4_5)
print("""Example of four vector: """)
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
a__ = {ct: c, x: 1, y: 1, z: 1}
a__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 654 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def _UpperCAmelCase ( a : Any="ro" , a : Optional[Any]="en" , a : Any="wmt16" , a : Optional[Any]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
snake_case__ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
snake_case__ = datasets.load_dataset(a , a )
if save_dir is None:
snake_case__ = F'''{dataset}-{pair}'''
snake_case__ = Path(a )
save_dir.mkdir(exist_ok=a )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
snake_case__ = """val""" if split == """validation""" else split
snake_case__ = save_dir.joinpath(F'''{fn}.source''' )
snake_case__ = save_dir.joinpath(F'''{fn}.target''' )
snake_case__ = src_path.open("""w+""" )
snake_case__ = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case__ = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 654 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a__ = pytest.mark.integration
a__ = {"""comet"""}
a__ = importlib.util.find_spec("""fairseq""") is not None
a__ = {"""code_eval"""}
a__ = os.name == """nt"""
a__ = {"""bertscore""", """frugalscore""", """perplexity"""}
a__ = importlib.util.find_spec("""transformers""") is not None
def _UpperCAmelCase ( a : Tuple ):
@wraps(a )
def wrapper(self : Union[str, Any] , a : Union[str, Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( a : str ):
@wraps(a )
def wrapper(self : Union[str, Any] , a : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( a : Optional[int] ):
@wraps(a )
def wrapper(self : List[Any] , a : Any ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( ):
snake_case__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase_ , lowercase_ , lowercase_ )
@local
class _lowerCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
_lowercase : Optional[Any] = {}
_lowercase : Optional[Any] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""")
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""")
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = """[...]"""
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCamelCase__)).module_path)
snake_case__ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__)
# check parameters
snake_case__ = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__):
with self.use_local_metrics():
try:
snake_case__ = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = """[...]"""
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCamelCase__)).module_path)
# run doctest
with self.use_local_metrics():
snake_case__ = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def __magic_name__ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Any):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__):
yield
else:
yield
@contextmanager
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
def load_local_metric(UpperCamelCase__ : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : int):
return load_metric(os.path.join("""metrics""" , UpperCamelCase__) , *UpperCamelCase__ , **UpperCamelCase__)
with patch("""datasets.load_metric""") as mock_load_metric:
snake_case__ = load_local_metric
yield
@classmethod
def __magic_name__ ( cls : int , UpperCamelCase__ : Tuple):
'''simple docstring'''
def wrapper(UpperCamelCase__ : Optional[int]):
snake_case__ = contextmanager(UpperCamelCase__)
snake_case__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def _UpperCAmelCase ( a : str ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any , UpperCamelCase__ : List[str]):
'''simple docstring'''
assert len(input_dict["""input_ids"""]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
snake_case__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def _UpperCAmelCase ( a : Union[str, Any] ):
import torch
def bert_cos_score_idf(a : Tuple , a : Tuple , *a : List[str] , **a : List[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(a ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
snake_case__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def _UpperCAmelCase ( a : List[Any] ):
def load_from_checkpoint(a : Optional[Any] ):
class _lowerCAmelCase :
"""simple docstring"""
def __magic_name__ ( self : str , UpperCamelCase__ : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
assert len(UpperCamelCase__) == 2
snake_case__ = [0.19, 0.92]
return scores, sum(UpperCamelCase__) / len(UpperCamelCase__)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
snake_case__ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
snake_case__ = load_from_checkpoint
yield
def _UpperCAmelCase ( ):
snake_case__ = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
snake_case__ = """ERROR"""
snake_case__ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(a , match=re.escape(a ) ):
metric.compute(predictions=[] , references=[] , scheme=a )
| 654 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : Tuple = XGLMConfig
_lowercase : Optional[Any] = {}
_lowercase : Optional[Any] = '''gelu'''
def __init__( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=1_4 , UpperCamelCase__ : str=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=9_9 , UpperCamelCase__ : Union[str, Any]=3_2 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Any=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=5_1_2 , UpperCamelCase__ : List[str]=0.02 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = d_model
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = ffn_dim
snake_case__ = activation_function
snake_case__ = activation_dropout
snake_case__ = attention_dropout
snake_case__ = max_position_embeddings
snake_case__ = initializer_range
snake_case__ = None
snake_case__ = 0
snake_case__ = 2
snake_case__ = 1
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return XGLMConfig.from_pretrained("""facebook/xglm-564M""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length])
snake_case__ = self.get_config()
snake_case__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , )
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase : Union[str, Any] = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase : str = False
_lowercase : Union[str, Any] = False
_lowercase : str = False
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFXGLMModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=3_7)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFXGLMModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""")
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : int , UpperCamelCase__ : List[Any]=True):
'''simple docstring'''
snake_case__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
snake_case__ = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case__ = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
snake_case__ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__)
@slow
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
snake_case__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
tf.random.set_seed(0)
snake_case__ = tokenizer("""Today is a nice day and""" , return_tensors="""tf""")
snake_case__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0"""):
snake_case__ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0])
snake_case__ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__)
snake_case__ = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
@slow
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
snake_case__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
snake_case__ = """left"""
# use different length sentences to test batching
snake_case__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
snake_case__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" , padding=UpperCamelCase__)
snake_case__ = inputs["""input_ids"""]
snake_case__ = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=1_2)
snake_case__ = tokenizer(sentences[0] , return_tensors="""tf""").input_ids
snake_case__ = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2)
snake_case__ = tokenizer(sentences[1] , return_tensors="""tf""").input_ids
snake_case__ = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2)
snake_case__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__)
snake_case__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__)
snake_case__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__)
snake_case__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence])
| 654 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = ['''pixel_values''']
def __init__( self : Union[str, Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__)
snake_case__ = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case__ = get_size_dict(UpperCamelCase__)
snake_case__ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""")
snake_case__ = do_resize
snake_case__ = do_rescale
snake_case__ = do_normalize
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = size
snake_case__ = resample
snake_case__ = rescale_factor
snake_case__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __magic_name__ ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
snake_case__ = get_size_dict(UpperCamelCase__)
if "shortest_edge" in size:
snake_case__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
snake_case__ = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__)
snake_case__ = resample if resample is not None else self.resample
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(UpperCamelCase__)
if not is_batched(UpperCamelCase__):
snake_case__ = [images]
if not valid_images(UpperCamelCase__):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(UpperCamelCase__) for image in images]
if do_resize:
snake_case__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__) for image in images]
snake_case__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images]
snake_case__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
| 654 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : bool , UpperCamelCase__ : str = None , UpperCamelCase__ : list = None):
'''simple docstring'''
snake_case__ = None
snake_case__ = os.path.abspath(os.path.join("""examples""" , """by_feature"""))
snake_case__ = os.path.abspath("""examples""")
for item in os.listdir(UpperCamelCase__):
if item not in EXCLUDE_EXAMPLES:
snake_case__ = os.path.join(UpperCamelCase__ , UpperCamelCase__)
if os.path.isfile(UpperCamelCase__) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase__ , feature_script=UpperCamelCase__ , tested_section="""main()""" if parser_only else """training_function()""" , ):
snake_case__ = compare_against_test(
os.path.join(UpperCamelCase__ , UpperCamelCase__) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = """\n""".join(UpperCamelCase__)
if special_strings is not None:
for string in special_strings:
snake_case__ = diff.replace(UpperCamelCase__ , """""")
self.assertEqual(UpperCamelCase__ , """""")
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.one_complete_example("""complete_nlp_example.py""" , UpperCamelCase__)
self.one_complete_example("""complete_nlp_example.py""" , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = os.path.abspath(os.path.join("""examples""" , """cv_example.py"""))
snake_case__ = [
""" """ * 1_6 + """{\n\n""",
""" """ * 2_0 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 2_0 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 2_0 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 2_0 + """\"epoch\": epoch,\n\n""",
""" """ * 1_6 + """},\n\n""",
""" """ * 1_6 + """step=epoch,\n""",
""" """ * 1_2,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
self.one_complete_example("""complete_cv_example.py""" , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = False
@classmethod
def __magic_name__ ( cls : Union[str, Any]):
'''simple docstring'''
super().setUpClass()
snake_case__ = tempfile.mkdtemp()
snake_case__ = os.path.join(cls._tmpdir , """default_config.yml""")
write_basic_config(save_location=cls.configPath)
snake_case__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""")))
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
snake_case__ = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""")))
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0')}
'''.split()
snake_case__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__)
self.assertNotIn("""epoch 0:""" , UpperCamelCase__)
self.assertIn("""epoch 1:""" , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2')}
'''.split()
snake_case__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__)
if torch.cuda.is_available():
snake_case__ = torch.cuda.device_count()
else:
snake_case__ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , UpperCamelCase__)
self.assertIn("""epoch 1:""" , UpperCamelCase__)
else:
self.assertIn("""epoch 0:""" , UpperCamelCase__)
self.assertIn("""epoch 1:""" , UpperCamelCase__)
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""}):
snake_case__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__)
snake_case__ = re.findall("""({.+})""" , UpperCamelCase__)
snake_case__ = [r for r in results if """accuracy""" in r][-1]
snake_case__ = ast.literal_eval(UpperCamelCase__)
self.assertGreaterEqual(results["""accuracy"""] , 0.75)
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""})
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
snake_case__ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , """tracking""")))
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs)
| 654 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 1 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : bool , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None):
'''simple docstring'''
super().__init__()
snake_case__ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case__ = torch.zeros(UpperCamelCase__ , UpperCamelCase__)
else:
snake_case__ = None
snake_case__ = torch.nn.Parameter(UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : VQModel
_lowercase : CLIPTextModel
_lowercase : CLIPTokenizer
_lowercase : TransformeraDModel
_lowercase : LearnedClassifierFreeSamplingEmbeddings
_lowercase : VQDiffusionScheduler
def __init__( self : Any , UpperCamelCase__ : VQModel , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : TransformeraDModel , UpperCamelCase__ : VQDiffusionScheduler , UpperCamelCase__ : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=UpperCamelCase__ , transformer=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
def __magic_name__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = len(UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else 1
# get prompt text embeddings
snake_case__ = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''')
snake_case__ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case__ = self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case__ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase__)
# duplicate text embeddings for each generation per prompt
snake_case__ = prompt_embeds.repeat_interleave(UpperCamelCase__ , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case__ = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case__ = negative_prompt_embeds.unsqueeze(0).repeat(UpperCamelCase__ , 1 , 1)
else:
snake_case__ = [""""""] * batch_size
snake_case__ = text_input_ids.shape[-1]
snake_case__ = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" , )
snake_case__ = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
snake_case__ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase__)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case__ = negative_prompt_embeds.shape[1]
snake_case__ = negative_prompt_embeds.repeat(1 , UpperCamelCase__ , 1)
snake_case__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self : str , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 1_0_0 , UpperCamelCase__ : float = 5.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = 1
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = len(UpperCamelCase__)
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__)}''')
snake_case__ = batch_size * num_images_per_prompt
snake_case__ = guidance_scale > 1.0
snake_case__ = self._encode_prompt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCamelCase__)}.''')
# get the initial completely masked latents unless the user supplied it
snake_case__ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case__ = self.transformer.num_vector_embeds - 1
snake_case__ = torch.full(UpperCamelCase__ , UpperCamelCase__).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''')
snake_case__ = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device)
snake_case__ = self.scheduler.timesteps.to(self.device)
snake_case__ = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase__)):
# expand the sample if we are doing classifier free guidance
snake_case__ = torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case__ = self.transformer(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , timestep=UpperCamelCase__).sample
if do_classifier_free_guidance:
snake_case__ , snake_case__ = model_output.chunk(2)
snake_case__ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase__ , dim=1 , keepdim=UpperCamelCase__)
snake_case__ = self.truncate(UpperCamelCase__ , UpperCamelCase__)
# remove `log(0)`'s (`-inf`s)
snake_case__ = model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.vqvae.config.vq_embed_dim
snake_case__ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case__ = self.vqvae.quantize.get_codebook_entry(UpperCamelCase__ , shape=UpperCamelCase__)
snake_case__ = self.vqvae.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__).sample
snake_case__ = (image / 2 + 0.5).clamp(0 , 1)
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float):
'''simple docstring'''
snake_case__ , snake_case__ = torch.sort(UpperCamelCase__ , 1 , descending=UpperCamelCase__)
snake_case__ = torch.exp(UpperCamelCase__)
snake_case__ = sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case__ = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase__)
snake_case__ = torch.cat((all_true, keep_mask) , dim=1)
snake_case__ = keep_mask[:, :-1, :]
snake_case__ = keep_mask.gather(1 , indices.argsort(1))
snake_case__ = log_p_x_0.clone()
snake_case__ = -torch.inf # -inf = log(0)
return rv
| 654 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = ['''audio_values''', '''audio_mask''']
def __init__( self : List[str] , UpperCamelCase__ : Dict=2_0_4_8 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : List[str]=[1_6, 1_6] , UpperCamelCase__ : Any=1_2_8 , UpperCamelCase__ : str=4_4_1_0_0 , UpperCamelCase__ : Dict=8_6 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : Optional[int]=0.0 , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = spectrogram_length
snake_case__ = num_channels
snake_case__ = patch_size
snake_case__ = feature_size // self.patch_size[1]
snake_case__ = n_fft
snake_case__ = sampling_rate // hop_length_to_sampling_rate
snake_case__ = sampling_rate
snake_case__ = padding_value
snake_case__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=UpperCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __magic_name__ ( self : str , UpperCamelCase__ : np.array):
'''simple docstring'''
snake_case__ = spectrogram(
UpperCamelCase__ , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
snake_case__ = log_spec[:, :-1]
snake_case__ = log_spec - 20.0
snake_case__ = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self : Dict , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , **UpperCamelCase__ : int , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
snake_case__ = isinstance(UpperCamelCase__ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''')
snake_case__ = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
snake_case__ = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray):
snake_case__ = np.asarray(UpperCamelCase__ , dtype=np.floataa)
elif isinstance(UpperCamelCase__ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
snake_case__ = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
snake_case__ = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
snake_case__ = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCamelCase__):
snake_case__ = [np.asarray(UpperCamelCase__ , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
snake_case__ = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
snake_case__ = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
snake_case__ = np.array(UpperCamelCase__).astype(np.floataa)
# convert into correct format for padding
snake_case__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
snake_case__ = np.ones([len(UpperCamelCase__), 1, max_time_len, self.feature_size]).astype(np.floataa)
snake_case__ = padded_audio_features * self.padding_value
for i in range(len(UpperCamelCase__)):
snake_case__ = audio_features[i]
snake_case__ = feature
# return as BatchFeature
if return_attention_mask:
snake_case__ = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
snake_case__ = {"""audio_values""": padded_audio_features}
snake_case__ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
return encoded_inputs
| 654 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def _UpperCAmelCase ( a : int = 100_0000 , a : int = 10 ):
snake_case__ = defaultdict(a )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.