code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Dict:
super().__init__()
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''')
self.register_modules(
speech_model=lowerCamelCase_ , speech_processor=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , )
def UpperCAmelCase__ ( self , lowerCamelCase_ = "auto") -> str:
if slice_size == "auto":
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Dict:
self.enable_attention_slicing(lowerCamelCase_)
@torch.no_grad()
def __call__( self , lowerCamelCase_ , lowerCamelCase_=1_6_0_0_0 , lowerCamelCase_ = 5_1_2 , lowerCamelCase_ = 5_1_2 , lowerCamelCase_ = 5_0 , lowerCamelCase_ = 7.5 , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = 0.0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = 1 , **lowerCamelCase_ , ) -> Any:
UpperCamelCase = self.speech_processor.feature_extractor(
lowerCamelCase_ , return_tensors='''pt''' , sampling_rate=lowerCamelCase_).input_features.to(self.device)
UpperCamelCase = self.speech_model.generate(lowerCamelCase_ , max_length=4_8_0_0_0_0)
UpperCamelCase = self.speech_processor.tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , normalize=lowerCamelCase_)[
0
]
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = 1
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_)}')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ , lowerCamelCase_) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowerCamelCase_)}.')
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}')
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase = text_embeddings.shape
UpperCamelCase = text_embeddings.repeat(1 , lowerCamelCase_ , 1)
UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = 42
if negative_prompt is None:
UpperCamelCase = [''''''] * batch_size
elif type(lowerCamelCase_) is not type(lowerCamelCase_):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_)} !='
F' {type(lowerCamelCase_)}.')
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [negative_prompt]
elif batch_size != len(lowerCamelCase_):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_)}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''')
else:
UpperCamelCase = negative_prompt
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = uncond_embeddings.shape[1]
UpperCamelCase = uncond_embeddings.repeat(1 , lowerCamelCase_ , 1)
UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device='''cpu''' , dtype=lowerCamelCase_).to(
self.device)
else:
UpperCamelCase = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_)
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
UpperCamelCase = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase_)):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_)
# predict the noise residual
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2)
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = 1 / 0.1_8215 * latents
UpperCamelCase = self.vae.decode(lowerCamelCase_).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase_ , nsfw_content_detected=lowerCamelCase_)
| 34
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase )
| 34
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
SCREAMING_SNAKE_CASE_ = '\\n Text data.\n Second line of data.'
SCREAMING_SNAKE_CASE_ = 'file'
@pytest.fixture(scope="session" )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> List[Any]:
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_UpperCAmelCase : Union[str, Any] = bytes(lowerCAmelCase , "utf-8" )
with zstd.open(lowerCAmelCase , "wb" ) as f:
f.write(lowerCAmelCase )
return path
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> int:
with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase ) , "w" ) as f:
f.write(lowerCAmelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Optional[int] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any ) -> Any:
_UpperCAmelCase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_UpperCAmelCase : List[Any] = input_paths[compression_format]
_UpperCAmelCase : Optional[Any] = tmp_path / "cache"
_UpperCAmelCase : Any = DownloadConfig(cache_dir=lowerCAmelCase , extract_compressed_file=lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = cached_path(lowerCAmelCase , download_config=lowerCAmelCase )
with open(lowerCAmelCase ) as f:
_UpperCAmelCase : Optional[int] = f.read()
with open(lowerCAmelCase ) as f:
_UpperCAmelCase : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: Any , lowerCAmelCase: int ) -> Tuple:
_UpperCAmelCase : List[Any] = "custom_cache"
_UpperCAmelCase : Any = "custom_extracted_dir"
_UpperCAmelCase : Optional[int] = tmp_path / "custom_extracted_path"
if default_extracted:
_UpperCAmelCase : List[str] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , lowerCAmelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase ) )
_UpperCAmelCase : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_UpperCAmelCase : int = xz_file
_UpperCAmelCase : Union[str, Any] = (
DownloadConfig(extract_compressed_file=lowerCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase )
)
_UpperCAmelCase : List[str] = cached_path(lowerCAmelCase , download_config=lowerCAmelCase )
assert Path(lowerCAmelCase ).parent.parts[-2:] == expected
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> List[Any]:
# absolute path
_UpperCAmelCase : Tuple = str(Path(lowerCAmelCase ).resolve() )
assert cached_path(lowerCAmelCase ) == text_file
# relative path
_UpperCAmelCase : Union[str, Any] = str(Path(lowerCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase ) == text_file
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[Any]:
# absolute path
_UpperCAmelCase : Optional[int] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCAmelCase ):
cached_path(lowerCAmelCase )
# relative path
_UpperCAmelCase : Dict = "./__missing_file__.txt"
with pytest.raises(lowerCAmelCase ):
cached_path(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = get_from_cache(F'tmp://{tmpfs_file}' )
with open(lowerCAmelCase ) as f:
_UpperCAmelCase : str = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
with pytest.raises(lowerCAmelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Tuple:
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase ):
http_get("https://huggingface.co" , temp_file=lowerCAmelCase )
with pytest.raises(lowerCAmelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase ):
ftp_get("ftp://huggingface.co" , temp_file=lowerCAmelCase )
with pytest.raises(lowerCAmelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> Any:
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase ):
fsspec_get("s3://huggingface.co" , temp_file=lowerCAmelCase )
with pytest.raises(lowerCAmelCase ):
fsspec_head("s3://huggingface.co" )
| 467
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> int:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : List[Any] = 0
for chara, chara in zip(lowerCAmelCase , lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 467
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Tuple , __A :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
def __iter__( self :Union[str, Any] ) -> int:
"""simple docstring"""
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=True ):
SCREAMING_SNAKE_CASE__ = Accelerator(even_batches=UpperCamelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: bool = False ):
if iterable:
SCREAMING_SNAKE_CASE__ = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
else:
SCREAMING_SNAKE_CASE__ = TensorDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
return dl
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: List[int] , UpperCamelCase__: List[int] , ):
SCREAMING_SNAKE_CASE__ = create_dataloader(accelerator=UpperCamelCase__ , dataset_size=UpperCamelCase__ , batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE__ = output.sum()
loss.backward()
batch_idxs.append(UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE__ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
SCREAMING_SNAKE_CASE__ = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE__ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = original_state
if __name__ == "__main__":
main()
| 6
|
"""simple docstring"""
from __future__ import annotations
def A ( snake_case__ ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
| 0
|
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ):
if vertex not in self.adjacency:
UpperCAmelCase = {}
self.num_vertices += 1
def _UpperCamelCase ( self ,A ,A ,A ):
self.add_vertex(A )
self.add_vertex(A )
if head == tail:
return
UpperCAmelCase = weight
UpperCAmelCase = weight
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(A ) ):
UpperCAmelCase = list(edges[i] )
edges.sort(key=lambda A : e[2] )
for i in range(len(A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCAmelCase = edges[i][2] + 1
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
UpperCAmelCase = weight
UpperCAmelCase = weight
def __str__( self ):
UpperCAmelCase = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCAmelCase = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _UpperCamelCase ( self ):
return self.adjacency.keys()
@staticmethod
def _UpperCamelCase ( A=None ,A=None ):
UpperCAmelCase = Graph()
if vertices is None:
UpperCAmelCase = []
if edges is None:
UpperCAmelCase = []
for vertex in vertices:
g.add_vertex(A )
for edge in edges:
g.add_edge(*A )
return g
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = {}
UpperCAmelCase = {}
def __len__( self ):
return len(self.parent )
def _UpperCamelCase ( self ,A ):
if item in self.parent:
return self.find(A )
UpperCAmelCase = item
UpperCAmelCase = 0
return item
def _UpperCamelCase ( self ,A ):
if item not in self.parent:
return self.make_set(A )
if item != self.parent[item]:
UpperCAmelCase = self.find(self.parent[item] )
return self.parent[item]
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = self.find(A )
UpperCAmelCase = self.find(A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCAmelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCAmelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCAmelCase = roota
return roota
return None
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = graph.num_vertices
UpperCAmelCase = Graph.UnionFind()
UpperCAmelCase = []
while num_components > 1:
UpperCAmelCase = {}
for vertex in graph.get_vertices():
UpperCAmelCase = -1
UpperCAmelCase = graph.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
UpperCAmelCase = union_find.find(A )
UpperCAmelCase = union_find.find(A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = cheap_edge[vertex]
if union_find.find(A ) != union_find.find(A ):
union_find.union(A ,A )
mst_edges.append(cheap_edge[vertex] )
UpperCAmelCase = num_components - 1
UpperCAmelCase = Graph.build(edges=A )
return mst
| 701
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 0
|
'''simple docstring'''
snake_case_ = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 421
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A_ = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
snake_case__ : Optional[Any] = self.transformer_dir
shutil.copy(
os.path.join(__lowerCamelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def _lowerCAmelCase ( self : int ):
snake_case__ : Union[str, Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def _lowerCAmelCase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=None ):
snake_case__ : Optional[int] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
snake_case__ : List[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
snake_case__ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
snake_case__ : Tuple = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
snake_case__ : Union[str, Any] = os.path.join(self.transformer_dir , 'new_code.py' )
with open(__lowerCamelCase , 'w' , newline='\n' ) as f:
f.write(__lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCamelCase )
with open(__lowerCamelCase , 'r' ) as f:
self.assertTrue(f.read() , __lowerCamelCase )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : List[str] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( self : Any ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __lowerCamelCase ) , )
# Copy consistency with a really long name
snake_case__ : Union[str, Any] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('Bert' , __lowerCamelCase , __lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __lowerCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , __lowerCamelCase ) , )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
snake_case__ : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
snake_case__ : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
snake_case__ , snake_case__ : Optional[Any] = check_copies.convert_to_localized_md(
__lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] )
self.assertFalse(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
snake_case__ , snake_case__ : Any = check_copies.convert_to_localized_md(
__lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__lowerCamelCase )
snake_case__ : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
snake_case__ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ , snake_case__ : Any = check_copies.convert_to_localized_md(
__lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 270
| 0
|
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def _A ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 715
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 5_0),)
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : Dict ) -> Any:
pass
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : List[str] , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def a ( self : Optional[int] ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
lowerCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self : Tuple ) -> int:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a ( self : List[str] ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> List[str]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
def a ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a ( self : Any ) -> Tuple:
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_580 ) < 1e-3
def a ( self : int ) -> Dict:
lowerCAmelCase__ = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_878 ) < 1e-3
def a ( self : Any ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_995 ) < 1e-3
def a ( self : int ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_434 ) < 1e-3
| 125
| 0
|
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> Optional[int]:
UpperCamelCase_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
UpperCamelCase_: Dict = 1 - (matter_density + radiation_density + dark_energy)
UpperCamelCase_: Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCamelCase_: List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
A_ : List[str] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 57
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22
| 0
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCAmelCase_ : int = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
lowerCAmelCase_ : str = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
lowerCAmelCase_ : Optional[Any] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def snake_case__ ( self : Tuple , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : int=4 , lowercase__ : List[str]=False ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Tuple = compute_bleu(
reference_corpus=lowercase__ , translation_corpus=lowercase__ , max_order=lowercase__ , smooth=lowercase__ )
(_UpperCamelCase) : Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 707
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''gptj'''
UpperCAmelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , lowercase__ : Union[str, Any]=50_400 , lowercase__ : Union[str, Any]=2_048 , lowercase__ : Tuple=4_096 , lowercase__ : List[str]=28 , lowercase__ : Optional[int]=16 , lowercase__ : str=64 , lowercase__ : Any=None , lowercase__ : Any="gelu_new" , lowercase__ : Union[str, Any]=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Any=0.0 , lowercase__ : Tuple=1e-5 , lowercase__ : Any=0.0_2 , lowercase__ : int=True , lowercase__ : int=50_256 , lowercase__ : Any=50_256 , lowercase__ : Tuple=False , **lowercase__ : str , ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : List[str] = n_positions
_UpperCamelCase : Union[str, Any] = n_embd
_UpperCamelCase : Union[str, Any] = n_layer
_UpperCamelCase : Optional[Any] = n_head
_UpperCamelCase : Dict = n_inner
_UpperCamelCase : Optional[Any] = rotary_dim
_UpperCamelCase : Tuple = activation_function
_UpperCamelCase : List[Any] = resid_pdrop
_UpperCamelCase : Any = embd_pdrop
_UpperCamelCase : Optional[Any] = attn_pdrop
_UpperCamelCase : Optional[Any] = layer_norm_epsilon
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Optional[int] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : PretrainedConfig , lowercase__ : str = "default" , lowercase__ : List[PatchingSpec] = None , lowercase__ : bool = False , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__ )
if not getattr(self._config , "pad_token_id" , lowercase__ ):
# TODO: how to do that better?
_UpperCamelCase : Optional[int] = 0
@property
def snake_case__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction="inputs" )
_UpperCamelCase : str = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCamelCase : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self : int ) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self : Dict ) ->int:
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self : int , lowercase__ : PreTrainedTokenizer , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = super(lowercase__ , self ).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCamelCase , _UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCamelCase : Optional[int] = seqlen + 2
_UpperCamelCase : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase : Dict = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
_UpperCamelCase : str = common_inputs["attention_mask"]
if self.use_past:
_UpperCamelCase : int = ordered_inputs["attention_mask"].dtype
_UpperCamelCase : Optional[int] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case__ ( self : Tuple ) ->int:
'''simple docstring'''
return 13
| 204
| 0
|
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Union[str, Any] = 0
while number > 0:
_lowercase : List[Any] = number % 10
sum_of_digits += last_digit
_lowercase : Tuple = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __magic_name__ ( SCREAMING_SNAKE_CASE = 100 ) -> int:
_lowercase : List[Any] = factorial(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = split_and_add(SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 66
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : str = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "xlm-roberta"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = classifier_dropout
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 168
| 0
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A__ :
"""simple docstring"""
@staticmethod
def a__ ( *__a: Optional[int] , **__a: Optional[int] )-> Optional[Any]:
pass
def snake_case ( UpperCamelCase__ : Image ) -> str:
lowerCamelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a__ ( self: Optional[Any] , __a: Tuple , __a: Optional[int] , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[Any] = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a__ ( self: Any , __a: List[str] , __a: List[str] )-> Optional[int]:
lowerCamelCase : Union[str, Any] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __a )
import datasets
lowerCamelCase : Union[str, Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase : Optional[int] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def a__ ( self: Optional[Any] )-> List[str]:
pass
@slow
@require_torch
def a__ ( self: Union[str, Any] )-> Optional[Any]:
lowerCamelCase : str = """Intel/dpt-large"""
lowerCamelCase : List[Any] = pipeline("""depth-estimation""" , model=__a )
lowerCamelCase : List[str] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase : Dict = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.6_62 )
@require_torch
def a__ ( self: Optional[Any] )-> List[str]:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
for i in range(length - 1 ):
UpperCAmelCase_ : Any = i
for k in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if collection[k] < collection[least]:
UpperCAmelCase_ : Union[str, Any] = k
if least != i:
UpperCAmelCase_ , UpperCAmelCase_ : str = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 71
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_snake_case = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def _A ( __magic_name__ , __magic_name__ ):
inspect_dataset(__magic_name__ , __magic_name__ )
lowercase__ = path + ".py"
assert script_name in os.listdir(__magic_name__ )
assert "__pycache__" not in os.listdir(__magic_name__ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def _A ( __magic_name__ , __magic_name__ ):
inspect_metric(__magic_name__ , __magic_name__ )
lowercase__ = path + ".py"
assert script_name in os.listdir(__magic_name__ )
assert "__pycache__" not in os.listdir(__magic_name__ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = get_dataset_config_info(__magic_name__ , config_name=__magic_name__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
with pytest.raises(__magic_name__ ):
get_dataset_config_info(__magic_name__ , config_name=__magic_name__ )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = get_dataset_config_names(__magic_name__ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = get_dataset_infos(__magic_name__ )
assert list(infos.keys() ) == expected_configs
lowercase__ = expected_configs[0]
assert expected_config in infos
lowercase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = get_dataset_infos(__magic_name__ )
assert expected_config in infos
lowercase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
with pytest.raises(__magic_name__ ):
get_dataset_split_names(__magic_name__ , config_name=__magic_name__ )
| 611
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'visual_bert'
def __init__( self :Dict , _lowercase :Union[str, Any]=3_05_22 , _lowercase :List[Any]=7_68 , _lowercase :List[Any]=5_12 , _lowercase :List[str]=12 , _lowercase :Tuple=12 , _lowercase :Optional[Any]=30_72 , _lowercase :int="gelu" , _lowercase :Any=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :str=5_12 , _lowercase :str=2 , _lowercase :Optional[int]=0.02 , _lowercase :Tuple=1e-12 , _lowercase :Optional[int]=False , _lowercase :List[str]=True , _lowercase :Union[str, Any]=1 , _lowercase :List[Any]=0 , _lowercase :int=2 , **_lowercase :List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = visual_embedding_dim
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
lowercase__ = bypass_transformer
lowercase__ = special_visual_initialize
| 611
| 1
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case_ : Optional[Any] ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
snake_case__ : List[str] = deepcopy(snake_case_ )
elif os.path.exists(snake_case_ ):
with io.open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ : List[Any] = json.load(snake_case_ )
else:
try:
snake_case__ : Tuple = baseaa.urlsafe_baadecode(snake_case_ ).decode('''utf-8''' )
snake_case__ : Any = json.loads(snake_case_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
snake_case__ : List[Any] = config
self.set_stage_and_offload()
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Any = self.get_value('''zero_optimization.stage''' , -1 )
# offload
snake_case__ : Optional[Any] = False
if self.is_zeroa() or self.is_zeroa():
snake_case__ : Optional[Any] = set(['''cpu''', '''nvme'''] )
snake_case__ : int = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
snake_case__ : Dict = True
def __magic_name__ ( self : Tuple , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.config
# find the config node of interest if it exists
snake_case__ : Union[str, Any] = ds_key_long.split('''.''' )
snake_case__ : Dict = nodes.pop()
for node in nodes:
snake_case__ : List[str] = config.get(snake_case_ )
if config is None:
return None, ds_key
return config, ds_key
def __magic_name__ ( self : List[str] , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
'''simple docstring'''
snake_case__ , snake_case__ : Optional[Any] = self.find_config_node(snake_case_ )
if config is None:
return default
return config.get(snake_case_ , snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : Dict , snake_case_ : List[Any]=False ):
'''simple docstring'''
snake_case__ : Dict = self.config
# find the config node of interest if it exists
snake_case__ : Any = ds_key_long.split('''.''' )
for node in nodes:
snake_case__ : Optional[int] = config
snake_case__ : Optional[Any] = config.get(snake_case_ )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case_ )
def __magic_name__ ( self : str , snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.get_value(snake_case_ )
return False if value is None else bool(snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : List[str] = self.get_value(snake_case_ )
return False if value is None else not bool(snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return self._stage == 2
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return self._stage == 3
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return self._offload
class a :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = engine
def __magic_name__ ( self : int , snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ):
'''simple docstring'''
self.engine.backward(snake_case_ , **snake_case_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : List[str] ):
'''simple docstring'''
super().__init__(snake_case_ , device_placement=snake_case_ , scaler=snake_case_ )
snake_case__ : Optional[int] = hasattr(self.optimizer , '''overflow''' )
def __magic_name__ ( self : Optional[int] , snake_case_ : Tuple=None ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Dict , snake_case_ : str , snake_case_ : Optional[int] ):
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : str , snake_case_ : Optional[int]=0.0_0_1 , snake_case_ : str=0 , **snake_case_ : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = params
snake_case__ : Optional[Any] = lr
snake_case__ : Dict = weight_decay
snake_case__ : Optional[Any] = kwargs
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case_ : Any , snake_case_ : Union[str, Any]=None , snake_case_ : str=0 , **snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Optional[Any] = optimizer
snake_case__ : str = total_num_steps
snake_case__ : int = warmup_num_steps
snake_case__ : str = kwargs
| 347
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Dict , *snake_case_ : Any , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : List[Any] , *snake_case_ : Any , **snake_case_ : int ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 347
| 1
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = 50 # max width of layer names
UpperCamelCase_ = 70 # max width of quantizer names
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=__UpperCAmelCase , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=__UpperCAmelCase , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=__UpperCAmelCase , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=__UpperCAmelCase , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=__UpperCAmelCase , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=__UpperCAmelCase , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
if args.calibrator == "max":
UpperCAmelCase_ = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
UpperCAmelCase_ = '''histogram'''
elif args.calibrator == "mse":
UpperCAmelCase_ = '''histogram'''
else:
raise ValueError(f"Invalid calibrator {args.calibrator}" )
UpperCAmelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=__UpperCAmelCase )
UpperCAmelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__UpperCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
logger.info('''Configuring Model for Quantization''' )
logger.info(f"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__UpperCAmelCase , ['''embeddings'''] , which='''weight''' , _disabled=__UpperCAmelCase )
if args.quant_disable:
set_quantizer_by_name(__UpperCAmelCase , [''''''] , _disabled=__UpperCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__UpperCAmelCase , args.quant_disable_keyword , _disabled=__UpperCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__UpperCAmelCase , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=__UpperCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__UpperCAmelCase , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=__UpperCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(__UpperCAmelCase )
if args.fuse_qkv:
fuse_qkv(__UpperCAmelCase , __UpperCAmelCase )
if args.clip_gelu:
clip_gelu(__UpperCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__UpperCAmelCase )
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"{name:80}: {module}" )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
def fusea(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
for mod in [qq, qk, qv]:
if not hasattr(__UpperCAmelCase , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
UpperCAmelCase_ = qq._amax.detach().item()
UpperCAmelCase_ = qk._amax.detach().item()
UpperCAmelCase_ = qv._amax.detach().item()
UpperCAmelCase_ = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
qq._amax.fill_(__UpperCAmelCase )
qk._amax.fill_(__UpperCAmelCase )
qv._amax.fill_(__UpperCAmelCase )
logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
UpperCAmelCase_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__UpperCAmelCase )
UpperCAmelCase_ = mod._input_quantizer._amax.data.detach().item()
logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__UpperCAmelCase , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase_ = mod.weight.shape[0]
UpperCAmelCase_ = mod._weight_quantizer._amax.detach()
UpperCAmelCase_ = torch.ones(__UpperCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__UpperCAmelCase , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__UpperCAmelCase , keepdims=__UpperCAmelCase ).detach()
logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase_ = amax
def A ( __UpperCAmelCase , __UpperCAmelCase=25 , __UpperCAmelCase=180 , __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if ignore is None:
UpperCAmelCase_ = []
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = [ignore]
UpperCAmelCase_ = 0
for name, mod in model.named_modules():
if not hasattr(__UpperCAmelCase , '''weight''' ):
continue
UpperCAmelCase_ = max(__UpperCAmelCase , len(__UpperCAmelCase ) )
for name, mod in model.named_modules():
UpperCAmelCase_ = getattr(__UpperCAmelCase , '''_input_quantizer''' , __UpperCAmelCase )
UpperCAmelCase_ = getattr(__UpperCAmelCase , '''_weight_quantizer''' , __UpperCAmelCase )
if not hasattr(__UpperCAmelCase , '''weight''' ):
continue
if type(__UpperCAmelCase ) in ignore:
continue
if [True for s in ignore if type(__UpperCAmelCase ) is str and s in name]:
continue
UpperCAmelCase_ = f"Act:{input_q.extra_repr()}"
UpperCAmelCase_ = f"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase_ = f"{name:{name_width}} {act_str} {wgt_str}"
if len(__UpperCAmelCase ) <= line_width:
logger.info(__UpperCAmelCase )
else:
logger.info(f"{name:{name_width}} {act_str}" )
logger.info(f"{' ':{name_width}} {wgt_str}" )
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = 0
for name, mod in model.named_modules():
if isinstance(__UpperCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(f"{name:80} {mod}" )
count += 1
print(f"{count} TensorQuantizers found in model" )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if quantizer_mod is not None:
assert hasattr(__UpperCAmelCase , __UpperCAmelCase )
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
logger.warning(f"{name} has no {quantizer}" )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="both" , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = f"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__UpperCAmelCase , __UpperCAmelCase , '''_input_quantizer''' , __UpperCAmelCase , __UpperCAmelCase )
if which in ["weight", "both"]:
set_quantizer(__UpperCAmelCase , __UpperCAmelCase , '''_weight_quantizer''' , __UpperCAmelCase , __UpperCAmelCase )
logger.info(__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__UpperCAmelCase , '''_input_quantizer''' ) or hasattr(__UpperCAmelCase , '''_weight_quantizer''' ):
for n in names:
if re.search(__UpperCAmelCase , __UpperCAmelCase ):
set_quantizers(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = f"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
logger.info(__UpperCAmelCase )
| 561
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return int(x / 2**20 )
class a_ :
def __enter__( self :Any) -> str:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase_ = torch.cuda.memory_allocated()
return self
def __exit__( self :Tuple , *_lowercase :Tuple) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
UpperCAmelCase_ = bamb(self.end - self.begin)
UpperCAmelCase_ = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A ( __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = "bert-base-cased" , __UpperCAmelCase = 320 , __UpperCAmelCase = 160 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
UpperCAmelCase_ = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['''lr''']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = args.model_name_or_path
set_seed(__UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
UpperCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ = 1
UpperCAmelCase_ = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
UpperCAmelCase_ = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ = 0
# Now we train the model
UpperCAmelCase_ = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
UpperCAmelCase_ = model(**__UpperCAmelCase )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=__UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=__UpperCAmelCase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=__UpperCAmelCase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCAmelCase , default=1 , help='''Number of train epochs.''' , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 561
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : str = inspect.getfile(accelerate.test_utils)
_lowerCamelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["""scripts""", """test_script.py"""])
_lowerCamelCase : Dict = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["""scripts""", """test_distributed_data_loop.py"""])
_lowerCamelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["""scripts""", """test_ops.py"""])
@require_multi_gpu
def UpperCamelCase_ ( self) -> Union[str, Any]:
print(F'Found {torch.cuda.device_count()} devices.')
_lowerCamelCase : List[str] = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a_ , env=os.environ.copy())
@require_multi_gpu
def UpperCamelCase_ ( self) -> List[str]:
print(F'Found {torch.cuda.device_count()} devices.')
_lowerCamelCase : Optional[Any] = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a_ , env=os.environ.copy())
@require_multi_gpu
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : Any = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a_ , env=os.environ.copy())
@require_multi_gpu
def UpperCamelCase_ ( self) -> List[str]:
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only')
_lowerCamelCase : Optional[int] = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1"""):
execute_subprocess_async(a_ , env=os.environ.copy())
if __name__ == "__main__":
UpperCAmelCase = Accelerator()
UpperCAmelCase = (accelerator.state.process_index + 2, 10)
UpperCAmelCase = torch.randint(0, 10, shape).to(accelerator.device)
UpperCAmelCase = """"""
UpperCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 88
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : int = str(snake_case__ )
return len(snake_case__ ) == 9 and set(snake_case__ ) == set("""123456789""" )
def UpperCAmelCase__ ():
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_snake_case : List[Any] = 10_00_02 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_snake_case : List[str] = 1_00_20_03 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 609
| 0
|
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
move_disk(UpperCamelCase__ , UpperCamelCase__ )
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
'''simple docstring'''
print("""moving disk from""" , UpperCamelCase__ , """to""" , UpperCamelCase__ )
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
lowerCamelCase__: Dict = int(input("""Height of hanoi: """ ).strip() )
move_tower(UpperCamelCase__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 720
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_lowercase = {
'gpt-neox-20b': 2_048,
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : int , __a : str=None , __a : Any=None , __a : Dict=None , __a : Tuple="<|endoftext|>" , __a : Tuple="<|endoftext|>" , __a : List[str]="<|endoftext|>" , __a : Any=False , **__a : Optional[Any] , ):
'''simple docstring'''
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
lowerCamelCase__: List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
lowerCamelCase__: Optional[Any] = getattr(__a , pre_tok_state.pop("""type""" ) )
lowerCamelCase__: str = add_prefix_space
lowerCamelCase__: int = pre_tok_class(**__a )
lowerCamelCase__: Optional[Any] = add_prefix_space
def lowerCamelCase_ ( self : str , __a : str , __a : Optional[str] = None ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCamelCase_ ( self : str , __a : "Conversation" ):
'''simple docstring'''
lowerCamelCase__: Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
lowerCamelCase__: Any = input_ids[-self.model_max_length :]
return input_ids
| 242
| 0
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCamelCase : Any = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__lowerCamelCase : List[str] = F'{src_lang}-{tgt_lang}'
__lowerCamelCase : Tuple = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase )
__lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase ,'README.md' )
print(F'Generating {path}' )
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
_UpperCamelCase = Path(__file__).resolve().parent.parent.parent
_UpperCamelCase = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model_name.split('-')
_UpperCamelCase = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 459
|
'''simple docstring'''
import string
def a_ ( _lowerCAmelCase ) -> str:
__lowerCamelCase : Union[str, Any] = ''
for i in sequence:
__lowerCamelCase : Tuple = ord(_lowerCAmelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def a_ ( _lowerCAmelCase ) -> str:
__lowerCamelCase : Optional[Any] = string.ascii_letters
__lowerCamelCase : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_lowerCAmelCase )] if c in letters else c for c in sequence )
def a_ ( ) -> None:
from timeit import timeit
print('Running performance benchmarks...' )
__lowerCamelCase : Tuple = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" ,setup=_lowerCAmelCase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" ,setup=_lowerCAmelCase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 459
| 1
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _UpperCamelCase ( ):
'''simple docstring'''
print("""Making key files...""" )
make_key_files("""rsa""" , 1_0_2_4 )
print("""Key files generation successful.""" )
def _UpperCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
print("""Generating prime p...""" )
UpperCAmelCase__ : Dict = rabinMiller.generate_large_prime(UpperCamelCase__ )
print("""Generating prime q...""" )
UpperCAmelCase__ : List[Any] = rabinMiller.generate_large_prime(UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
UpperCAmelCase__ : str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(UpperCamelCase__ , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
UpperCAmelCase__ : Optional[int] = cryptoMath.find_mod_inverse(UpperCamelCase__ , (p - 1) * (q - 1) )
UpperCAmelCase__ : Any = (n, e)
UpperCAmelCase__ : Any = (n, d)
return (public_key, private_key)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCAmelCase__ : str = generate_key(UpperCamelCase__ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , """w""" ) as out_file:
out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , """w""" ) as out_file:
out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 701
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A ={
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 113
| 0
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase :
def __init__( self :List[str] , lowercase :Optional[int] , lowercase :int=sys.maxsize ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''bilinear'''
SCREAMING_SNAKE_CASE = max_size
SCREAMING_SNAKE_CASE = short_edge_length
def __call__( self :Optional[Any] , lowercase :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for img in imgs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE = size * 1.0 / min(lowercase , lowercase )
if h < w:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = size, scale * w
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = scale * h, size
if max(lowercase , lowercase ) > self.max_size:
SCREAMING_SNAKE_CASE = self.max_size * 1.0 / max(lowercase , lowercase )
SCREAMING_SNAKE_CASE = newh * scale
SCREAMING_SNAKE_CASE = neww * scale
SCREAMING_SNAKE_CASE = int(neww + 0.5 )
SCREAMING_SNAKE_CASE = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE = Image.fromarray(lowercase )
SCREAMING_SNAKE_CASE = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE = np.asarray(lowercase )
else:
SCREAMING_SNAKE_CASE = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
lowercase , (newh, neww) , mode=self.interp_method , align_corners=lowercase ).squeeze(0 )
img_augs.append(lowercase )
return img_augs
class lowerCamelCase :
def __init__( self :Any , lowercase :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE = lambda lowercase : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self :Optional[Any] , lowercase :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tuple(max(lowercase ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE = [
nn.functional.pad(
lowercase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowercase , lowercase )
]
return torch.stack(lowercase ), torch.tensor(lowercase )
def __call__( self :Dict , lowercase :str , lowercase :List[Any]=False ) -> int:
"""simple docstring"""
with torch.no_grad():
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE = [images]
if single_image:
assert len(lowercase ) == 1
for i in range(len(lowercase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowercase , images.pop(lowercase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowercase , torch.as_tensor(img_tensorize(images.pop(lowercase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
SCREAMING_SNAKE_CASE = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE = self.aug(lowercase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE = [self.normalizer(lowercase ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.pad(lowercase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE = torch.true_divide(lowercase , lowercase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a ( a , a ) ->Union[str, Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a ( a , a ) ->Any:
'''simple docstring'''
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 201
|
class a_ :
def __init__( self , SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE = False ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
# A node will be a leaf if the tree contains its word
SCREAMING_SNAKE_CASE_ = is_leaf
SCREAMING_SNAKE_CASE_ = prefix
def A_( self , SCREAMING_SNAKE_CASE ) -> tuple[str, str, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A_( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.prefix == word:
SCREAMING_SNAKE_CASE_ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
SCREAMING_SNAKE_CASE_ = RadixNode(prefix=SCREAMING_SNAKE_CASE , is_leaf=SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ = self.nodes[word[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = incoming_node.match(
SCREAMING_SNAKE_CASE )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
SCREAMING_SNAKE_CASE_ = remaining_prefix
SCREAMING_SNAKE_CASE_ = self.nodes[matching_string[0]]
SCREAMING_SNAKE_CASE_ = RadixNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = aux_node
if remaining_word == "":
SCREAMING_SNAKE_CASE_ = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
SCREAMING_SNAKE_CASE_ = list(self.nodes.values() )[0]
SCREAMING_SNAKE_CASE_ = merging_node.is_leaf
self.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE_ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
SCREAMING_SNAKE_CASE_ = False
# If there is 1 edge, we merge it with its child
else:
SCREAMING_SNAKE_CASE_ = list(incoming_node.nodes.values() )[0]
SCREAMING_SNAKE_CASE_ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE_ = merging_node.nodes
return True
def A_( self , SCREAMING_SNAKE_CASE = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowercase ( ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'banana bananas bandana band apple all beast'.split()
SCREAMING_SNAKE_CASE_ = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowercase ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = RadixNode()
SCREAMING_SNAKE_CASE_ = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(SCREAMING_SNAKE_CASE )
print('Words:' , SCREAMING_SNAKE_CASE )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 205
| 0
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Dict = f"""{sampling_rate}"""
lowercase__ : Optional[Any] = '''1'''
lowercase__ : Union[str, Any] = '''f32le'''
lowercase__ : Any = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(_lowerCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ : Optional[int] = ffmpeg_process.communicate(_lowerCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowercase__ : str = output_stream[0]
lowercase__ : str = np.frombuffer(_lowerCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : str = "f32le" , ):
'''simple docstring'''
lowercase__ : List[Any] = f"""{sampling_rate}"""
lowercase__ : List[str] = '''1'''
if format_for_conversion == "s16le":
lowercase__ : Dict = 2
elif format_for_conversion == "f32le":
lowercase__ : int = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase__ : Optional[Any] = platform.system()
if system == "Linux":
lowercase__ : Union[str, Any] = '''alsa'''
lowercase__ : Tuple = '''default'''
elif system == "Darwin":
lowercase__ : List[Any] = '''avfoundation'''
lowercase__ : Optional[int] = ''':0'''
elif system == "Windows":
lowercase__ : str = '''dshow'''
lowercase__ : str = '''default'''
lowercase__ : Any = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
lowercase__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ : Any = _ffmpeg_stream(_lowerCAmelCase , _lowerCAmelCase )
for item in iterator:
yield item
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Any = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowercase__ : Dict = stream_chunk_s
else:
lowercase__ : Dict = chunk_length_s
lowercase__ : int = ffmpeg_microphone(_lowerCAmelCase , _lowerCAmelCase , format_for_conversion=_lowerCAmelCase )
if format_for_conversion == "s16le":
lowercase__ : Dict = np.intaa
lowercase__ : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowercase__ : Optional[Any] = np.floataa
lowercase__ : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase__ : List[Any] = chunk_length_s / 6
lowercase__ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCAmelCase , (int, float) ):
lowercase__ : Optional[int] = [stride_length_s, stride_length_s]
lowercase__ : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ : Union[str, Any] = datetime.datetime.now()
lowercase__ : int = datetime.timedelta(seconds=_lowerCAmelCase )
for item in chunk_bytes_iter(_lowerCAmelCase , _lowerCAmelCase , stride=(stride_left, stride_right) , stream=_lowerCAmelCase ):
# Put everything back in numpy scale
lowercase__ : Dict = np.frombuffer(item['raw'] , dtype=_lowerCAmelCase )
lowercase__ : List[str] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
lowercase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int = False ):
'''simple docstring'''
lowercase__ : str = b''''''
lowercase__ : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase__ : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCAmelCase ) < chunk_len:
lowercase__ : Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCAmelCase ) >= chunk_len:
# We are flushing the accumulator
lowercase__ : Dict = (_stride_left, stride_right)
lowercase__ : Optional[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
lowercase__ : Tuple = False
yield item
lowercase__ : Union[str, Any] = stride_left
lowercase__ : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCAmelCase ) > stride_left:
lowercase__ : Tuple = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
lowercase__ : Dict = False
yield item
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Tuple = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCAmelCase , stdout=subprocess.PIPE , bufsize=_lowerCAmelCase ) as ffmpeg_process:
while True:
lowercase__ : Optional[int] = ffmpeg_process.stdout.read(_lowerCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 718
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any=7 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Optional[Any]=18 , UpperCAmelCase : List[str]=30 , UpperCAmelCase : Optional[Any]=400 , UpperCAmelCase : str=True , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : Dict=True , ):
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size_divisor
A_ = do_rescale
def __A ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = GLPNImageProcessor if is_vision_available() else None
def __A ( self : Any ):
A_ = GLPNImageProcessingTester(self )
@property
def __A ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size_divisor" ) )
self.assertTrue(hasattr(UpperCAmelCase , "resample" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_rescale" ) )
def __A ( self : List[str] ):
pass
def __A ( self : Optional[Any] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self : str ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self : str ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 86
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 271
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = False
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
UpperCAmelCase__ : List[str] = parser.parse_args()
UpperCAmelCase__ : Dict = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
UpperCAmelCase__ : str = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
UpperCAmelCase__ : Union[str, Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
UpperCAmelCase__ : Union[str, Any] = reader.read()
UpperCAmelCase__ : Any = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
UpperCAmelCase__ : int = UNetaDModel(**config)
else:
UpperCAmelCase__ : Optional[Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
UpperCAmelCase__ : int = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase__ : Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase__ : str = config[key]
del config[key]
UpperCAmelCase__ : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
UpperCAmelCase__ : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
UpperCAmelCase__ : Dict = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
UpperCAmelCase__ : Optional[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
UpperCAmelCase__ : Union[str, Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
UpperCAmelCase__ : List[Any] = param_value
UpperCAmelCase__ : List[str] = True
if not has_changed:
UpperCAmelCase__ : Any = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 711
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] =["""input_features""", """attention_mask"""]
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any]=8_0 , UpperCAmelCase__ : List[str]=1_6_0_0_0 , UpperCAmelCase__ : Optional[int]=8_0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[int] , ) ->Optional[int]:
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = num_mel_bins
SCREAMING_SNAKE_CASE : Dict = do_ceptral_normalize
SCREAMING_SNAKE_CASE : Optional[int] = normalize_means
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_vars
SCREAMING_SNAKE_CASE : Union[str, Any] = True
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , ) ->np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = ta_kaldi.fbank(UpperCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowercase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : float = 0.0 , ) ->np.ndarray:
"""simple docstring"""
if normalize_means:
SCREAMING_SNAKE_CASE : Tuple = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ )
if normalize_vars:
SCREAMING_SNAKE_CASE : Optional[Any] = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.divide(UpperCAmelCase__ , UpperCAmelCase__ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE : Tuple = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE : Optional[Any] = x.astype(np.floataa )
return x
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[np.ndarray] , UpperCAmelCase__ : Optional[np.ndarray] = None ) ->List[np.ndarray]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase__ , UpperCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
def __call__( self : str , UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE : Tuple = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : Optional[Any] = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Dict = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Optional[Any] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE : Tuple = [self._extract_fbank_features(UpperCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE : str = self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Dict = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : int = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE : Tuple = (
np.array(UpperCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE : str = self.normalize(
padded_inputs["""input_features"""] , attention_mask=UpperCAmelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Any = padded_inputs.convert_to_tensors(UpperCAmelCase__ )
return padded_inputs
| 446
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = """▁"""
__magic_name__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
__magic_name__ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__magic_name__ = {
"""facebook/xglm-564M""": 20_48,
}
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="<unk>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCamelCase__ = 7
lowerCamelCase__ = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowerCamelCase__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.sp_model )
lowerCamelCase__ = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCamelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = """""".join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 129
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _A ( __lowercase , __lowercase , __lowercase = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
lowerCamelCase__ = quote(__lowercase )
return hfh.hf_hub_url(__lowercase , __lowercase , repo_type="""dataset""" , revision=__lowercase )
| 129
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "microsoft/speecht5_tts"
_SCREAMING_SNAKE_CASE : List[str] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = "text_reader"
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : List[str] = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[Any] = ["text"]
_SCREAMING_SNAKE_CASE : int = ["audio"]
def lowerCAmelCase (self : int ) -> int:
if self.post_processor is None:
__a : Tuple = '''microsoft/speecht5_hifigan'''
super().setup()
def lowerCAmelCase (self : Dict , snake_case_ : str , snake_case_ : str=None ) -> Dict:
__a : Union[str, Any] = self.pre_processor(text=snake_case_ , return_tensors='''pt''' , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__a : Dict = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__a : str = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCAmelCase (self : int , snake_case_ : List[Any] ) -> str:
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : Any ) -> Optional[int]:
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 710
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=1_0 ):
__a : Tuple = []
for _ in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str=1_0 ):
__a : int = []
for step in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Any = os.path.join(lowerCAmelCase__ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowerCAmelCase__ )
__a : str = torch.load(lowerCAmelCase__ )
scheduler.load_state_dict(lowerCAmelCase__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__a : Optional[int] = torch.tensor([0.4, 0.2, -0.5] )
__a : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : Tuple = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
__a : Optional[int] = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase (self : Any ):
__a : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__a : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
__a : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : Tuple = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case_ , weight_decay=0.0 , relative_step=snake_case_ , scale_parameter=snake_case_ , warmup_init=snake_case_ , )
for _ in range(1_0_0_0 ):
__a : str = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = nn.Linear(50 ,50 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Any = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Tuple = 10
def lowerCAmelCase (self : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : str=None ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ , msg=snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a : Tuple = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__a , __a : Union[str, Any] = data
__a : int = scheduler_func(self.optimizer , **snake_case_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a : Tuple = unwrap_schedule(snake_case_ , self.num_steps )
self.assertListAlmostEqual(
snake_case_ , snake_case_ , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
__a : Any = scheduler_func(self.optimizer , **snake_case_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case_ ) # wrap to test picklability of the schedule
__a : Tuple = unwrap_and_save_reload_schedule(snake_case_ , self.num_steps )
self.assertListEqual(snake_case_ , snake_case_ , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : str ):
__a : Optional[int] = fn
def __call__(self : Any , *snake_case_ : List[Any] , **snake_case_ : Any ):
return self.fn(*snake_case_ , **snake_case_ )
@classmethod
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] ):
__a : Any = list(map(self , scheduler.lr_lambdas ) )
| 326
| 0
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Any:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : str ) -> List[Any]:
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Any ) -> Tuple:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Dict ) -> Any:
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
A = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : int ) -> Optional[int]:
# pass variant but use the non-variant filenames
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Any ) -> List[str]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
A = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> int:
# pass variant but use the non-variant filenames
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
| 106
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """canine"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=1_6_3_8_4 , A=1_6 , A=0.02 , A=1e-1_2 , A=0 , A=0Xe000 , A=0Xe001 , A=4 , A=4 , A=8 , A=1_6_3_8_4 , A=1_2_8 , **A , ) -> Union[str, Any]:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Any = hidden_size
snake_case : int = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Tuple = intermediate_size
snake_case : int = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Any = initializer_range
snake_case : List[str] = type_vocab_size
snake_case : Tuple = layer_norm_eps
# Character config:
snake_case : Optional[Any] = downsampling_rate
snake_case : Tuple = upsampling_kernel_size
snake_case : Dict = num_hash_functions
snake_case : int = num_hash_buckets
snake_case : List[Any] = local_transformer_stride
| 587
| 0
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = ort.SessionOptions()
UpperCAmelCase__ = False
return options
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
UpperCAmelCase__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = "A red cat sitting on a park bench"
UpperCAmelCase__ = np.random.RandomState(0 )
UpperCAmelCase__ = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=_lowercase , output_type="np" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 277
|
from __future__ import annotations
def __UpperCAmelCase ( __A ) -> list[int]:
'''simple docstring'''
UpperCAmelCase__ = [True] * limit
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCAmelCase__ = i * 2
while index < limit:
UpperCAmelCase__ = False
UpperCAmelCase__ = index + i
UpperCAmelCase__ = [2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def __UpperCAmelCase ( __A = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
UpperCAmelCase__ = prime_sieve(__A )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for i in range(len(__A ) ):
for j in range(i + length , len(__A ) ):
UpperCAmelCase__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCAmelCase__ = j - i
UpperCAmelCase__ = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 277
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = image.size
UpperCAmelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
UpperCAmelCase__ = np.array(lowercase_ ).astype(np.floataa ) / 255.0
UpperCAmelCase__ = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase__ = torch.from_numpy(lowercase_ )
return 2.0 * image - 1.0
class lowerCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowercase__ , unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self : int , _UpperCAmelCase : Any = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : Tuple = 0.0 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Union[str, Any] = "pil" , _UpperCAmelCase : Optional[int] = True , ):
"""simple docstring"""
if isinstance(lowercase__ , PIL.Image.Image ):
UpperCAmelCase__ = 1
elif isinstance(lowercase__ , torch.Tensor ):
UpperCAmelCase__ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase__ )}''' )
if isinstance(lowercase__ , PIL.Image.Image ):
UpperCAmelCase__ = preprocess(lowercase__ )
UpperCAmelCase__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase__ = next(self.unet.parameters() ).dtype
UpperCAmelCase__ = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__ )
UpperCAmelCase__ = image.to(device=self.device , dtype=lowercase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase__ , device=self.device )
UpperCAmelCase__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ = {}
if accepts_eta:
UpperCAmelCase__ = eta
for t in self.progress_bar(lowercase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase__ = torch.cat([latents, image] , dim=1 )
UpperCAmelCase__ = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
# predict the noise residual
UpperCAmelCase__ = self.unet(lowercase__ , lowercase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase__ = self.vqvae.decode(lowercase__ ).sample
UpperCAmelCase__ = torch.clamp(lowercase__ , -1.0 , 1.0 )
UpperCAmelCase__ = image / 2 + 0.5
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 603
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def A( self , lowercase__=0):
__UpperCAmelCase : str = np.random.RandomState(lowercase__)
__UpperCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A( self):
__UpperCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Tuple = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**lowercase__).images
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**lowercase__).images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : List[str] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**lowercase__).images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : int = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Dict = self.get_dummy_inputs()
__UpperCAmelCase : Optional[int] = pipe(**lowercase__).images
__UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Dict = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**lowercase__).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Dict = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : List[Any] = 3 * [inputs['''prompt''']]
# forward
__UpperCAmelCase : Dict = pipe(**lowercase__)
__UpperCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Dict = 3 * [inputs.pop('''prompt''')]
__UpperCAmelCase : Tuple = pipe.tokenizer(
lowercase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='''np''' , )
__UpperCAmelCase : Optional[Any] = text_inputs['''input_ids''']
__UpperCAmelCase : Union[str, Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__UpperCAmelCase : Union[str, Any] = prompt_embeds
# forward
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__)
__UpperCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def A( self):
__UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : int = self.get_dummy_inputs()
__UpperCAmelCase : Any = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : Optional[int] = negative_prompt
__UpperCAmelCase : List[Any] = 3 * [inputs['''prompt''']]
# forward
__UpperCAmelCase : Any = pipe(**lowercase__)
__UpperCAmelCase : str = output.images[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
__UpperCAmelCase : Any = 3 * [inputs.pop('''prompt''')]
__UpperCAmelCase : Optional[int] = []
for p in [prompt, negative_prompt]:
__UpperCAmelCase : str = pipe.tokenizer(
lowercase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='''np''' , )
__UpperCAmelCase : List[Any] = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = embeds
# forward
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__)
__UpperCAmelCase : str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
@property
def A( self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A( self):
__UpperCAmelCase : Optional[Any] = ort.SessionOptions()
__UpperCAmelCase : List[Any] = False
return options
def A( self):
# using the PNDM scheduler by default
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : int = '''A painting of a squirrel eating a burger'''
np.random.seed(0)
__UpperCAmelCase : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''')
__UpperCAmelCase : Dict = output.images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : List[str] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Any = '''open neural network exchange'''
__UpperCAmelCase : Any = np.random.RandomState(0)
__UpperCAmelCase : List[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : List[str] = output.images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : int = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Optional[Any] = '''open neural network exchange'''
__UpperCAmelCase : Optional[int] = np.random.RandomState(0)
__UpperCAmelCase : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : Union[str, Any] = output.images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : Optional[int] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Tuple = 0
def test_callback_fn(lowercase__ , lowercase__ , lowercase__) -> None:
__UpperCAmelCase : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[Any] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
__UpperCAmelCase : str = False
__UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = '''Andromeda galaxy in a bottle'''
__UpperCAmelCase : List[Any] = np.random.RandomState(0)
pipe(
prompt=lowercase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase__ , callback=lowercase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def A( self):
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowercase__ , lowercase__)
assert pipe.safety_checker is None
__UpperCAmelCase : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(lowercase__)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCAmelCase : Optional[int] = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
| 462
| 0
|
import sys
import turtle
def A ( UpperCAmelCase , UpperCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
__lowerCAmelCase :Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
__lowerCAmelCase :Optional[int] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 278
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a( __A , unittest.TestCase ):
lowerCamelCase__ :Optional[int] = AudioLDMPipeline
lowerCamelCase__ :List[Any] = TEXT_TO_AUDIO_PARAMS
lowerCamelCase__ :List[Any] = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCamelCase__ :Dict = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(3_2, 6_4) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__snake_case , )
_snake_case : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_snake_case : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case : Tuple = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
_snake_case : Optional[int] = ClapTextModelWithProjection(__snake_case )
_snake_case : Optional[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=7_7 )
_snake_case : Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__snake_case , )
_snake_case : Dict = SpeechTaHifiGan(__snake_case )
_snake_case : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def lowercase ( self , __snake_case , __snake_case=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(__snake_case ).startswith("mps" ):
_snake_case : List[Any] = torch.manual_seed(__snake_case )
else:
_snake_case : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_snake_case : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def lowercase ( self ) -> Any:
'''simple docstring'''
_snake_case : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Union[str, Any] = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**__snake_case )
_snake_case : List[str] = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : List[str] = self.get_dummy_inputs(__snake_case )
_snake_case : Any = audioldm_pipe(**__snake_case )
_snake_case : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__snake_case ) == 2_5_6
_snake_case : Any = audio[:1_0]
_snake_case : Dict = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase ( self ) -> Tuple:
'''simple docstring'''
_snake_case : Tuple = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**__snake_case )
_snake_case : Union[str, Any] = audioldm_pipe.to(__snake_case )
_snake_case : List[str] = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : Optional[int] = self.get_dummy_inputs(__snake_case )
_snake_case : List[str] = 3 * [inputs["prompt"]]
# forward
_snake_case : Dict = audioldm_pipe(**__snake_case )
_snake_case : Union[str, Any] = output.audios[0]
_snake_case : List[str] = self.get_dummy_inputs(__snake_case )
_snake_case : Tuple = 3 * [inputs.pop("prompt" )]
_snake_case : int = audioldm_pipe.tokenizer(
__snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors="pt" , )
_snake_case : Tuple = text_inputs["input_ids"].to(__snake_case )
_snake_case : Dict = audioldm_pipe.text_encoder(
__snake_case , )
_snake_case : Tuple = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : List[Any] = F.normalize(__snake_case , dim=-1 )
_snake_case : Union[str, Any] = prompt_embeds
# forward
_snake_case : int = audioldm_pipe(**__snake_case )
_snake_case : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase ( self ) -> List[str]:
'''simple docstring'''
_snake_case : int = self.get_dummy_components()
_snake_case : Optional[int] = AudioLDMPipeline(**__snake_case )
_snake_case : Tuple = audioldm_pipe.to(__snake_case )
_snake_case : Any = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : Tuple = self.get_dummy_inputs(__snake_case )
_snake_case : Union[str, Any] = 3 * ["this is a negative prompt"]
_snake_case : int = negative_prompt
_snake_case : Tuple = 3 * [inputs["prompt"]]
# forward
_snake_case : Tuple = audioldm_pipe(**__snake_case )
_snake_case : Optional[Any] = output.audios[0]
_snake_case : List[str] = self.get_dummy_inputs(__snake_case )
_snake_case : Dict = 3 * [inputs.pop("prompt" )]
_snake_case : List[Any] = []
for p in [prompt, negative_prompt]:
_snake_case : Optional[Any] = audioldm_pipe.tokenizer(
__snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors="pt" , )
_snake_case : List[Any] = text_inputs["input_ids"].to(__snake_case )
_snake_case : List[Any] = audioldm_pipe.text_encoder(
__snake_case , )
_snake_case : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : int = F.normalize(__snake_case , dim=-1 )
embeds.append(__snake_case )
_snake_case , _snake_case : int = embeds
# forward
_snake_case : Any = audioldm_pipe(**__snake_case )
_snake_case : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Tuple = self.get_dummy_components()
_snake_case : str = PNDMScheduler(skip_prk_steps=__snake_case )
_snake_case : Optional[int] = AudioLDMPipeline(**__snake_case )
_snake_case : Tuple = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : Tuple = self.get_dummy_inputs(__snake_case )
_snake_case : str = "egg cracking"
_snake_case : int = audioldm_pipe(**__snake_case , negative_prompt=__snake_case )
_snake_case : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__snake_case ) == 2_5_6
_snake_case : List[str] = audio[:1_0]
_snake_case : Optional[int] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : List[Any] = PNDMScheduler(skip_prk_steps=__snake_case )
_snake_case : Union[str, Any] = AudioLDMPipeline(**__snake_case )
_snake_case : int = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : List[Any] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
_snake_case : Dict = audioldm_pipe(__snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_snake_case : int = 2
_snake_case : Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
_snake_case : List[Any] = 2
_snake_case : List[str] = audioldm_pipe(__snake_case , num_inference_steps=2 , num_waveforms_per_prompt=__snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
_snake_case : Optional[int] = 2
_snake_case : str = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def lowercase ( self ) -> int:
'''simple docstring'''
_snake_case : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.get_dummy_components()
_snake_case : Union[str, Any] = AudioLDMPipeline(**__snake_case )
_snake_case : Any = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : Tuple = audioldm_pipe.vocoder.config.sampling_rate
_snake_case : str = self.get_dummy_inputs(__snake_case )
_snake_case : List[Any] = audioldm_pipe(audio_length_in_s=0.0_16 , **__snake_case )
_snake_case : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(__snake_case ) / vocoder_sampling_rate == 0.0_16
_snake_case : Tuple = audioldm_pipe(audio_length_in_s=0.0_32 , **__snake_case )
_snake_case : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__snake_case ) / vocoder_sampling_rate == 0.0_32
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**__snake_case )
_snake_case : List[str] = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : int = ["hey"]
_snake_case : int = audioldm_pipe(__snake_case , num_inference_steps=1 )
_snake_case : List[Any] = output.audios.shape
assert audio_shape == (1, 2_5_6)
_snake_case : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_snake_case : str = SpeechTaHifiGan(__snake_case ).to(__snake_case )
_snake_case : Any = audioldm_pipe(__snake_case , num_inference_steps=1 )
_snake_case : Tuple = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def lowercase ( self ) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__snake_case )
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__snake_case )
@slow
class _a( unittest.TestCase ):
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , __snake_case , __snake_case="cpu" , __snake_case=torch.floataa , __snake_case=0 ) -> Any:
'''simple docstring'''
_snake_case : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_snake_case : Dict = np.random.RandomState(__snake_case ).standard_normal((1, 8, 1_2_8, 1_6) )
_snake_case : Any = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
_snake_case : List[Any] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_snake_case : str = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : Tuple = self.get_inputs(__snake_case )
_snake_case : Optional[Any] = 2_5
_snake_case : str = audioldm_pipe(**__snake_case ).audios[0]
assert audio.ndim == 1
assert len(__snake_case ) == 8_1_9_2_0
_snake_case : Any = audio[7_7_2_3_0:7_7_2_4_0]
_snake_case : str = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
_snake_case : List[str] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Tuple = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_snake_case : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_snake_case : int = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
_snake_case : Any = self.get_inputs(__snake_case )
_snake_case : Optional[Any] = audioldm_pipe(**__snake_case ).audios[0]
assert audio.ndim == 1
assert len(__snake_case ) == 8_1_9_2_0
_snake_case : str = audio[2_7_7_8_0:2_7_7_9_0]
_snake_case : Any = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
_snake_case : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 278
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Tuple = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase__ : int = s_dict.pop(_snake_case )
elif "subsample" in key:
UpperCAmelCase__ : Union[str, Any] = s_dict.pop(_snake_case )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = emb.weight.shape
UpperCAmelCase__ : Optional[Any] = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
UpperCAmelCase__ : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : List[str] = torch.load(_snake_case ,map_location='cpu' )
UpperCAmelCase__ : Union[str, Any] = mam_aaa['args']
UpperCAmelCase__ : List[Any] = mam_aaa['model']
UpperCAmelCase__ : str = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
UpperCAmelCase__ : Union[str, Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCAmelCase__ : Optional[int] = args.share_decoder_input_output_embed
UpperCAmelCase__ : Tuple = [int(_snake_case ) for i in args.conv_kernel_sizes.split(',' )]
UpperCAmelCase__ : List[Any] = SpeechaTextConfig(
vocab_size=_snake_case ,max_source_positions=args.max_source_positions ,max_target_positions=args.max_target_positions ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='relu' ,num_conv_layers=len(_snake_case ) ,conv_channels=args.conv_channels ,conv_kernel_sizes=_snake_case ,input_feat_per_channel=args.input_feat_per_channel ,input_channels=args.input_channels ,tie_word_embeddings=_snake_case ,num_beams=5 ,max_length=200 ,use_cache=_snake_case ,decoder_start_token_id=2 ,early_stopping=_snake_case ,)
UpperCAmelCase__ : Optional[Any] = SpeechaTextForConditionalGeneration(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = model.model.load_state_dict(_snake_case ,strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCAmelCase__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase__ : Union[str, Any] = lm_head_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 110
|
"""simple docstring"""
import random
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Tuple = num - 1
UpperCAmelCase__ : Dict = 0
while s % 2 == 0:
UpperCAmelCase__ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase__ : Optional[int] = random.randrange(2 ,num - 1 )
UpperCAmelCase__ : List[str] = pow(_snake_case ,_snake_case ,_snake_case )
if v != 1:
UpperCAmelCase__ : List[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase__ : Optional[Any] = i + 1
UpperCAmelCase__ : Any = (v**2) % num
return True
def lowerCamelCase ( _snake_case ):
if num < 2:
return False
UpperCAmelCase__ : str = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_snake_case )
def lowerCamelCase ( _snake_case = 1024 ):
while True:
UpperCAmelCase__ : List[Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(_snake_case ):
return num
if __name__ == "__main__":
UpperCamelCase__ = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 110
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661
|
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowercase__ = '''\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n'''
lowercase__ = '''\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'''
lowercase__ = '''\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = CHRF.CHAR_ORDER , UpperCAmelCase_ = CHRF.WORD_ORDER , UpperCAmelCase_ = CHRF.BETA , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , ):
snake_case_ = len(references[0] )
if any(len(_lowercase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ = [[refs[i] for refs in references] for i in range(_lowercase )]
snake_case_ = CHRF(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
snake_case_ = sb_chrf.corpus_score(_lowercase , _lowercase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 508
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
_UpperCamelCase = ["""image_processor""", """tokenizer"""]
_UpperCamelCase = """BlipImageProcessor"""
_UpperCamelCase = """AutoTokenizer"""
def __init__( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : List[str]) -> Union[str, Any]:
super().__init__(_lowercase , _lowercase)
# add QFormer tokenizer
A_ = qformer_tokenizer
def __call__( self : Optional[int] , _lowercase : ImageInput = None , _lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase : bool = True , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Union[bool, str, TruncationStrategy] = None , _lowercase : Optional[int] = None , _lowercase : int = 0 , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : Optional[Union[str, TensorType]] = None , **_lowercase : List[Any] , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.')
A_ = BatchFeature()
if text is not None:
A_ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase)
A_ = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
A_ = qformer_text_encoding.pop('input_ids')
A_ = qformer_text_encoding.pop('attention_mask')
if images is not None:
A_ = self.image_processor(_lowercase , return_tensors=_lowercase)
encoding.update(_lowercase)
return encoding
def __snake_case ( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : List[Any]) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def __snake_case ( self : Optional[int] , *_lowercase : List[Any] , **_lowercase : List[str]) -> str:
return self.tokenizer.decode(*_lowercase , **_lowercase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : Any) -> Optional[int]:
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def __snake_case ( self : Optional[Any] , _lowercase : Optional[int] , **_lowercase : int) -> Tuple:
if os.path.isfile(_lowercase):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(_lowercase , exist_ok=_lowercase)
A_ = os.path.join(_lowercase , 'qformer_tokenizer')
self.qformer_tokenizer.save_pretrained(_lowercase)
return super().save_pretrained(_lowercase , **_lowercase)
@classmethod
def __snake_case ( cls : Dict , _lowercase : Dict , **_lowercase : int) -> Any:
A_ = AutoTokenizer.from_pretrained(_lowercase , subfolder='qformer_tokenizer')
A_ = cls._get_arguments_from_pretrained(_lowercase , **_lowercase)
args.append(_lowercase)
return cls(*_lowercase)
| 366
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
def _a ( __lowerCAmelCase : Dict ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""pixel_values"""]
def __init__( self : Dict , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case_ )
snake_case__ : List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
snake_case__ : List[str] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
snake_case__ : List[str] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
snake_case__ : int = get_size_dict(snake_case_ , param_name='''crop_size''' )
snake_case__ : Optional[int] = do_resize
snake_case__ : Dict = size
snake_case__ : str = do_center_crop
snake_case__ : str = crop_size
snake_case__ : str = resample
snake_case__ : Tuple = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : Optional[int] = do_normalize
snake_case__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
'''simple docstring'''
snake_case__ : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" in size:
snake_case__ : Tuple = get_resize_output_image_size(snake_case_ , size['''shortest_edge'''] , default_to_square=snake_case_ )
elif "height" in size and "width" in size:
snake_case__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Dict , ):
'''simple docstring'''
snake_case__ : List[str] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : str , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[int] , ):
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[Any] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case__ : str = to_numpy_array(snake_case_ )
if do_resize:
snake_case__ : str = self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ )
if do_center_crop:
snake_case__ : Tuple = self.center_crop(snake_case_ , size=snake_case_ )
if do_rescale:
snake_case__ : Optional[int] = self.rescale(image=snake_case_ , scale=snake_case_ )
if do_normalize:
snake_case__ : Optional[Any] = self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ )
snake_case__ : Union[str, Any] = to_channel_dimension_format(snake_case_ , snake_case_ )
return image
def __magic_name__ ( self : str , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : List[Any] , ):
'''simple docstring'''
snake_case__ : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case__ : Union[str, Any] = resample if resample is not None else self.resample
snake_case__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case__ : Any = image_std if image_std is not None else self.image_std
snake_case__ : str = size if size is not None else self.size
snake_case__ : List[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
snake_case__ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case__ : str = get_size_dict(snake_case_ , param_name='''crop_size''' )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
snake_case__ : Optional[Any] = make_batched(snake_case_ )
snake_case__ : Dict = [
[
self._preprocess_image(
image=snake_case_ , do_resize=snake_case_ , size=snake_case_ , resample=snake_case_ , do_center_crop=snake_case_ , crop_size=snake_case_ , do_rescale=snake_case_ , rescale_factor=snake_case_ , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , data_format=snake_case_ , )
for img in video
]
for video in videos
]
snake_case__ : Any = {'''pixel_values''': videos}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 701
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 502
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=16 ,__UpperCAmelCase=[1, 2, 1] ,__UpperCAmelCase=[2, 2, 4] ,__UpperCAmelCase=2 ,__UpperCAmelCase=2.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=10 ,__UpperCAmelCase=8 ,) -> Dict:
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : str = embed_dim
lowerCAmelCase__ : Tuple = depths
lowerCAmelCase__ : str = num_heads
lowerCAmelCase__ : Union[str, Any] = window_size
lowerCAmelCase__ : List[str] = mlp_ratio
lowerCAmelCase__ : Dict = qkv_bias
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = drop_path_rate
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Optional[int] = use_absolute_embeddings
lowerCAmelCase__ : int = patch_norm
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : int = encoder_stride
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> str:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = SwinvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = SwinvaForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = SwinvaForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = self.type_sequence_label_size
lowerCAmelCase__ : Tuple = SwinvaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ : int = config_and_inputs
lowerCAmelCase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__lowercase : Tuple = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Optional[int] = False
__lowercase : Dict = False
__lowercase : Any = False
__lowercase : Any = False
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Union[str, Any] = SwinvaModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,embed_dim=37 )
def UpperCAmelCase_ ( self ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> int:
pass
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowerCAmelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase ,nn.Linear ) )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCAmelCase__ : List[Any] = outputs.attentions
lowerCAmelCase__ : Tuple = len(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : str = True
lowerCAmelCase__ : int = config.window_size**2
lowerCAmelCase__ : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowerCAmelCase__ : Optional[int] = len(__UpperCAmelCase )
# Check attention is always last and order is fine
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
lowerCAmelCase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states ,len(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCAmelCase__ : List[Any] = outputs.hidden_states
lowerCAmelCase__ : Union[str, Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# Swinv2 has a different seq_length
lowerCAmelCase__ : List[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowerCAmelCase__ : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = reshaped_hidden_states[0].shape
lowerCAmelCase__ : Optional[Any] = (
reshaped_hidden_states[0].view(__UpperCAmelCase ,__UpperCAmelCase ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : int = True
self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = True
self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Optional[Any] = True
self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,(padded_height, padded_width) )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = SwinvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> Any:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase__ : Any = image_processor(images=__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : int = model(**__UpperCAmelCase )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
lowerCAmelCase__ : List[str] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
| 565
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : Tuple , *A : Any , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : List[Any] , **A : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Optional[Any] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : List[str] , *A : List[str] , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : List[str] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : List[str] , *A : int , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[Any] , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : List[str] , **A : str ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Optional[Any] , *A : Optional[Any] , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Optional[int] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Optional[int] , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : Optional[Any] , *A : int , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Optional[Any] , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Optional[Any] , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : int , *A : Optional[Any] , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Tuple , **A : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Tuple , **A : int ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Tuple , *A : str , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Any , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Optional[int] , *A : Any , **A : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[str] , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Dict , *A : Any , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[str] , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Any ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["torch"]
def __init__( self : int , *A : int , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Optional[int] , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : Optional[Any] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Any , *A : Union[str, Any] , **A : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[Any] , **A : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : int , **A : Any ):
requires_backends(cls , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : Tuple , **_UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : int , **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
def UpperCamelCase_ ( *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(_UpperCAmelCase , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Optional[int] , *A : Tuple , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : List[str] , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Dict , *A : List[str] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Optional[Any] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Any , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : List[Any] , *A : int , **A : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Optional[int] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Optional[int] , *A : List[Any] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : str , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Optional[int] , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : List[str] , *A : List[Any] , **A : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : str , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Tuple , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["torch"]
def __init__( self : Dict , *A : Optional[int] , **A : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Any , **A : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Optional[int] , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["torch"]
def __init__( self : Union[str, Any] , *A : int , **A : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : List[str] , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[Any] , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Union[str, Any] , *A : List[str] , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : Optional[Any] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : List[Any] , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : List[str] , *A : Union[str, Any] , **A : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Tuple , **A : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Tuple , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Any , *A : Union[str, Any] , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Dict , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Any , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Optional[Any] , *A : Union[str, Any] , **A : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : int , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : str , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : Optional[Any] , *A : Tuple , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Optional[Any] , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *A : List[Any] , **A : str ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : str , *A : Dict , **A : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : List[str] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Any , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["torch"]
def __init__( self : Any , *A : List[str] , **A : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Dict , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Optional[Any] , **A : Any ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Any , *A : List[str] , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Dict , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *A : Any , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Union[str, Any] , *A : List[Any] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : str , **A : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Dict , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Optional[int] , *A : Dict , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Dict , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : int , *A : int , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Tuple , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : List[Any] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["torch"]
def __init__( self : Dict , *A : List[Any] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : Tuple , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : str , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["torch"]
def __init__( self : Tuple , *A : List[Any] , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : str , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : List[Any] , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : Union[str, Any] , *A : Any , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Optional[Any] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : int , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Tuple , *A : List[Any] , **A : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *A : Any , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : int , **A : str ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : int , *A : List[str] , **A : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Optional[int] , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : Optional[Any] , **A : Dict ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Any , *A : Optional[Any] , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : int , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *A : Union[str, Any] , **A : int ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : Dict , *A : Any , **A : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : Optional[int] , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Union[str, Any] , *A : Dict , **A : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : List[Any] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : int , **A : str ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : Optional[Any] , *A : Optional[int] , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : Tuple , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : Tuple , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Tuple , *A : Any , **A : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Dict , **A : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *A : Tuple , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["torch"]
def __init__( self : List[str] , *A : int , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *A : Dict , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : List[str] , **A : Tuple ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["torch"]
def __init__( self : Optional[Any] , *A : Dict , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *A : Optional[Any] , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Any , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["torch"]
def __init__( self : Any , *A : Any , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *A : Optional[int] , **A : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Tuple , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : int , *A : str , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *A : Union[str, Any] , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[Any] , **A : List[str] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["torch"]
def __init__( self : List[str] , *A : Any , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *A : List[Any] , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *A : Union[str, Any] , **A : Tuple ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["torch"]
def __init__( self : str , *A : Any , **A : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : Tuple , **A : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *A : Optional[int] , **A : List[Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Any , *A : Optional[int] , **A : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : int , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : str , **A : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["torch"]
def __init__( self : Optional[Any] , *A : Optional[Any] , **A : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : List[str] , **A : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *A : List[str] , **A : Tuple ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = ["torch"]
def __init__( self : List[str] , *A : Optional[Any] , **A : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *A : str , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : List[Any] , **A : Tuple ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["torch"]
def __init__( self : Optional[int] , *A : Union[str, Any] , **A : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *A : Any , **A : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : List[Any] , **A : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["torch"]
def __init__( self : Union[str, Any] , *A : List[str] , **A : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *A : List[Any] , **A : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *A : Optional[int] , **A : Optional[Any] ):
requires_backends(cls , ["torch"] )
| 244
| 0
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __snake_case (__snake_case ):
lowerCAmelCase__ = "autoformer"
lowerCAmelCase__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : str , _UpperCAmelCase : List[Any] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : str = "student_t" , _UpperCAmelCase : List[Any] = "nll" , _UpperCAmelCase : List[str] = 1 , _UpperCAmelCase : str = [1, 2, 3, 4, 5, 6, 7] , _UpperCAmelCase : Optional[Any] = True , _UpperCAmelCase : Optional[Any] = 0 , _UpperCAmelCase : Tuple = 0 , _UpperCAmelCase : Optional[Any] = 0 , _UpperCAmelCase : Optional[Any] = 0 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Tuple = 64 , _UpperCAmelCase : Optional[Any] = 2 , _UpperCAmelCase : Union[str, Any] = 2 , _UpperCAmelCase : Dict = 2 , _UpperCAmelCase : Optional[int] = 2 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : str = "gelu" , _UpperCAmelCase : List[str] = 0.1 , _UpperCAmelCase : List[str] = 0.1 , _UpperCAmelCase : Tuple = 0.1 , _UpperCAmelCase : List[str] = 0.1 , _UpperCAmelCase : List[Any] = 0.1 , _UpperCAmelCase : Any = 100 , _UpperCAmelCase : Tuple = 0.02 , _UpperCAmelCase : Union[str, Any] = True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any] = 10 , _UpperCAmelCase : Optional[Any] = 25 , _UpperCAmelCase : Tuple = 3 , **_UpperCAmelCase : Union[str, Any] , ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[Any] = prediction_length
_lowerCAmelCase : Tuple = context_length if context_length is not None else prediction_length
_lowerCAmelCase : Optional[int] = distribution_output
_lowerCAmelCase : Tuple = loss
_lowerCAmelCase : Union[str, Any] = input_size
_lowerCAmelCase : Tuple = num_time_features
_lowerCAmelCase : Any = lags_sequence
_lowerCAmelCase : Dict = scaling
_lowerCAmelCase : str = num_dynamic_real_features
_lowerCAmelCase : Optional[Any] = num_static_real_features
_lowerCAmelCase : Any = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_lowerCAmelCase : int = cardinality
else:
_lowerCAmelCase : List[str] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_lowerCAmelCase : str = embedding_dimension
else:
_lowerCAmelCase : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowerCAmelCase : Any = num_parallel_samples
# Transformer architecture configuration
_lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
_lowerCAmelCase : List[Any] = d_model
_lowerCAmelCase : List[Any] = encoder_attention_heads
_lowerCAmelCase : List[Any] = decoder_attention_heads
_lowerCAmelCase : List[str] = encoder_ffn_dim
_lowerCAmelCase : List[str] = decoder_ffn_dim
_lowerCAmelCase : List[str] = encoder_layers
_lowerCAmelCase : List[Any] = decoder_layers
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : Union[str, Any] = activation_dropout
_lowerCAmelCase : Union[str, Any] = encoder_layerdrop
_lowerCAmelCase : List[str] = decoder_layerdrop
_lowerCAmelCase : List[str] = activation_function
_lowerCAmelCase : List[Any] = init_std
_lowerCAmelCase : Any = use_cache
# Autoformer
_lowerCAmelCase : Dict = label_length
_lowerCAmelCase : Any = moving_average
_lowerCAmelCase : Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 702
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_lowerCAmelCase : List[Any] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = """sgugger/tiny-distilbert-classification"""
_lowerCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , torchscript=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , fpaa=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : int = AutoConfig.from_pretrained(_UpperCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(_UpperCAmelCase , """env.csv""" ) , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_UpperCAmelCase : int ):
self.assertTrue(hasattr(_UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , """log.txt""" ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """log.txt""" ) ).exists() )
| 196
| 0
|
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = ["""flax""", """transformers"""]
def __init__( self : List[str] , *a_ : Optional[Any] , **a_ : Any ):
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *a_ : List[str] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls : str , *a_ : List[Any] , **a_ : Any ):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class lowercase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = ["""flax""", """transformers"""]
def __init__( self : int , *a_ : Any , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *a_ : List[str] , **a_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *a_ : Tuple , **a_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class lowercase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = ["""flax""", """transformers"""]
def __init__( self : int , *a_ : Union[str, Any] , **a_ : Any ):
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *a_ : Any , **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *a_ : List[Any] , **a_ : int ):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class lowercase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = ["""flax""", """transformers"""]
def __init__( self : List[str] , *a_ : Any , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls : Dict , *a_ : List[Any] , **a_ : Any ):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *a_ : Union[str, Any] , **a_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
| 165
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :str = ["""input_ids""", """attention_mask"""]
__magic_name__ :Any = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Optional[int] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :List[Any] = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :str = 'post_processor'
lowerCAmelCase__ :Optional[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Any = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :int = tuple(state['cls'] )
lowerCAmelCase__ :List[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Union[str, Any] = add_prefix_space
lowerCAmelCase__ :Any = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Union[str, Any] = trim_offsets
lowerCAmelCase__ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase__ :str = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A ( A_ : List[Any]=None ):
if subparsers is not None:
snake_case : Union[str, Any] = subparsers.add_parser('''test''' )
else:
snake_case : Optional[Any] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=A_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A_ )
return parser
def A ( A_ : Tuple ):
snake_case : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
snake_case : int = script_name
else:
snake_case : List[str] = F"""--config_file={args.config_file} {script_name}"""
snake_case : List[Any] = ['''accelerate-launch'''] + test_args.split()
snake_case : Any = execute_subprocess_async(A_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def A ( ):
snake_case : str = test_command_parser()
snake_case : Tuple = parser.parse_args()
test_command(A_ )
if __name__ == "__main__":
main()
| 555
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def A ( ):
snake_case : Dict = 10
snake_case : List[Any] = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
snake_case : Dict = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(A_ ) ),
} , features=A_ , )
return dataset
@pytest.fixture(scope='''session''' )
def A ( A_ : List[Any] , A_ : Optional[Any] ):
snake_case : Any = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=A_ )
return filename
# FILE_CONTENT + files
UpperCAmelCase = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] ):
snake_case : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
snake_case : Union[str, Any] = FILE_CONTENT
with open(A_ , '''w''' ) as f:
f.write(A_ )
return filename
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[Any] ):
import bza
snake_case : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
snake_case : Union[str, Any] = bytes(A_ , '''utf-8''' )
with bza.open(A_ , '''wb''' ) as f:
f.write(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[Any] ):
import gzip
snake_case : Any = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
snake_case : Any = bytes(A_ , '''utf-8''' )
with gzip.open(A_ , '''wb''' ) as f:
f.write(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[int] ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
snake_case : Tuple = bytes(A_ , '''utf-8''' )
with lza.frame.open(A_ , '''wb''' ) as f:
f.write(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Union[str, Any] , A_ : str ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(A_ , '''w''' ) as archive:
archive.write(A_ , arcname=os.path.basename(A_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] , A_ : Optional[int] ):
import tarfile
snake_case : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(A_ , '''w''' ) as f:
f.add(A_ , arcname=os.path.basename(A_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Any ):
import lzma
snake_case : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
snake_case : Union[str, Any] = bytes(A_ , '''utf-8''' )
with lzma.open(A_ , '''wb''' ) as f:
f.write(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[int] , A_ : Union[str, Any] ):
import zipfile
snake_case : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.basename(A_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
snake_case : List[str] = bytes(A_ , '''utf-8''' )
with zstd.open(A_ , '''wb''' ) as f:
f.write(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : str ):
snake_case : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
snake_case : Any = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(A_ , '''w''' ) as f:
f.write(A_ )
return filename
UpperCAmelCase = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
UpperCAmelCase = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
UpperCAmelCase = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
UpperCAmelCase = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
UpperCAmelCase = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def A ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def A ( A_ : str ):
snake_case : str = datasets.Dataset.from_dict(A_ )
snake_case : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : str ):
snake_case : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(A_ ) ) as con:
snake_case : Optional[Any] = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] ):
snake_case : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(A_ , '''w''' , newline='''''' ) as f:
snake_case : Optional[int] = csv.DictWriter(A_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[int] ):
snake_case : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(A_ , '''w''' , newline='''''' ) as f:
snake_case : int = csv.DictWriter(A_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[Any] , A_ : Optional[int] ):
import bza
snake_case : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(A_ , '''rb''' ) as f:
snake_case : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(A_ , '''wb''' ) as f:
f.write(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : str , A_ : int , A_ : List[Any] ):
snake_case : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.basename(A_ ) )
f.write(A_ , arcname=os.path.basename(A_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Dict , A_ : Optional[Any] , A_ : Dict ):
snake_case : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(A_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Dict , A_ : Dict , A_ : Optional[int] ):
snake_case : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.join('''main_dir''' , os.path.basename(A_ ) ) )
f.write(A_ , arcname=os.path.join('''main_dir''' , os.path.basename(A_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Dict ):
snake_case : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
snake_case : Tuple = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(A_ , '''wb''' ) as f:
snake_case : Dict = pq.ParquetWriter(A_ , schema=A_ )
snake_case : str = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(A_ ) )] for k in DATA[0]} , schema=A_ )
writer.write_table(A_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] ):
snake_case : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
snake_case : Optional[int] = {'''data''': DATA}
with open(A_ , '''w''' ) as f:
json.dump(A_ , A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : str ):
snake_case : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
snake_case : Any = {'''data''': DATA_DICT_OF_LISTS}
with open(A_ , '''w''' ) as f:
json.dump(A_ , A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[Any] ):
snake_case : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(A_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(A_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[Any] ):
snake_case : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(A_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(A_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[Any] ):
snake_case : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(A_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(A_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : int ):
snake_case : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(A_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(A_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[int] , A_ : Optional[int] ):
import gzip
snake_case : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(A_ , '''rb''' ) as orig_file:
with gzip.open(A_ , '''wb''' ) as zipped_file:
zipped_file.writelines(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[int] , A_ : Any ):
import gzip
snake_case : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(A_ , '''rb''' ) as orig_file:
with gzip.open(A_ , '''wb''' ) as zipped_file:
zipped_file.writelines(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : str , A_ : List[str] , A_ : Optional[int] ):
snake_case : int = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.basename(A_ ) )
f.write(A_ , arcname=os.path.basename(A_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] , A_ : Any , A_ : Union[str, Any] , A_ : int ):
snake_case : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.join('''nested''' , os.path.basename(A_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] , A_ : Dict , A_ : Any ):
snake_case : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.join('''main_dir''' , os.path.basename(A_ ) ) )
f.write(A_ , arcname=os.path.join('''main_dir''' , os.path.basename(A_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[Any] , A_ : Tuple , A_ : List[str] ):
snake_case : int = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(A_ , '''w''' ) as f:
f.add(A_ , arcname=os.path.basename(A_ ) )
f.add(A_ , arcname=os.path.basename(A_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Tuple , A_ : Any , A_ : Tuple , A_ : Optional[Any] ):
snake_case : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(A_ , '''w''' ) as f:
f.add(A_ , arcname=os.path.join('''nested''' , os.path.basename(A_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : int ):
snake_case : int = ['''0''', '''1''', '''2''', '''3''']
snake_case : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(A_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] ):
snake_case : Union[str, Any] = ['''0''', '''1''', '''2''', '''3''']
snake_case : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(A_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[int] ):
snake_case : Optional[int] = ['''0''', '''1''', '''2''', '''3''']
snake_case : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(A_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[Any] , A_ : Any , A_ : Union[str, Any] ):
snake_case : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.basename(A_ ) )
f.write(A_ , arcname=os.path.basename(A_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : List[str] , A_ : Union[str, Any] , A_ : Optional[Any] ):
snake_case : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.join('''main_dir''' , os.path.basename(A_ ) ) )
f.write(A_ , arcname=os.path.join('''main_dir''' , os.path.basename(A_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Any , A_ : Union[str, Any] , A_ : str ):
snake_case : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(A_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Union[str, Any] ):
snake_case : Any = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
snake_case : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(A_ )
return path
@pytest.fixture(scope='''session''' )
def A ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def A ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[Any] , A_ : Optional[Any] ):
snake_case : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(A_ , '''w''' ) as f:
f.write(A_ , arcname=os.path.basename(A_ ) )
f.write(A_ , arcname=os.path.basename(A_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def A ( A_ : Optional[Any] ):
snake_case : Dict = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 555
| 1
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=16 , A=36 , A=6 , A=6 , A=6 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = embedding_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_hidden_groups
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_( self ) -> str:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case_( self , A , A , A , A , A , A , A ) -> Tuple:
_SCREAMING_SNAKE_CASE = AlbertModel(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A )
_SCREAMING_SNAKE_CASE = model(A , token_type_ids=A )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Any:
_SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
def snake_case_( self , A , A , A=False ) -> List[str]:
_SCREAMING_SNAKE_CASE = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
_SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = AlbertModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , hidden_size=37 )
def snake_case_( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*A )
@slow
def snake_case_( self ) -> Dict:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("""albert-base-v2""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1e-4 ) )
| 314
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 314
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase ( unittest.TestCase ):
a: List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _A ( self: Optional[Any] , __UpperCamelCase: List[str] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Optional[int] ):
_a = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_a = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
_a = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _A ( self: str , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Optional[int] ):
for example in examples:
_a = video_classifier(_A )
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
@require_torch
def _A ( self: Tuple ):
_a = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
_a = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
_a = pipeline(
'''video-classification''' , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
_a = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_a = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , )
_a = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _A ( self: List[Any] ):
pass
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase :List[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346
| 0
|
from __future__ import annotations
from typing import Any
class __magic_name__ :
'''simple docstring'''
def __init__( self:Tuple , _a:int ):
snake_case__ = num_of_nodes
snake_case__ = []
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:int , _a:int , _a:int ):
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case__ = self.find_component(_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] , _a:int , _a:int ):
if component_size[u_node] <= component_size[v_node]:
snake_case__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_a )
elif component_size[u_node] >= component_size[v_node]:
snake_case__ = self.find_component(_a )
component_size[u_node] += component_size[v_node]
self.set_component(_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = []
snake_case__ = 0
snake_case__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case__ , snake_case__ , snake_case__ = edge
snake_case__ = self.m_component[u]
snake_case__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_a , _a ):
snake_case__ , snake_case__ , snake_case__ = edge
snake_case__ = self.m_component[u]
snake_case__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_a , _a , _a )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
snake_case__ = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return getitem, k
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ):
return setitem, k, v
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return delitem, k
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Optional[int] ):
try:
return fun(lowerCAmelCase_ , *lowerCAmelCase_ ), None
except Exception as e:
return None, e
lowerCamelCase : List[str] = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase : Optional[int] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase : Optional[int] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : Tuple = HashMap(initial_block_size=4 )
__lowercase : Union[str, Any] = {}
for _, (fun, *args) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : Tuple = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
__lowercase , __lowercase : int = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
assert my_res == py_res
assert str(lowerCAmelCase_ ) == str(lowerCAmelCase_ )
assert set(lowerCAmelCase_ ) == set(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
assert set(my.items() ) == set(py.items() )
def snake_case_ ( ):
def is_public(lowerCAmelCase_ : str ) -> bool:
return not name.startswith("""_""" )
__lowercase : Optional[Any] = {name for name in dir({} ) if is_public(lowerCAmelCase_ )}
__lowercase : List[str] = {name for name in dir(HashMap() ) if is_public(lowerCAmelCase_ )}
assert dict_public_names > hash_public_names
| 149
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__SCREAMING_SNAKE_CASE : Tuple =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A_ :
_A :str = field(
default=__a , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__a )} )
_A :str = field(
default=__a , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_A :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A :int = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_A :int = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_A :int = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_A :bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_A :bool = field(
default=__a , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_A :float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_A :int = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_A :int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_A :int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class A_ ( __a ):
_A :str = '''train'''
_A :Union[str, Any] = '''dev'''
class A_ ( __a ):
_A :SquadDataTrainingArguments
_A :List[SquadFeatures]
_A :Split
_A :bool
def __init__( self : Tuple , snake_case__ : SquadDataTrainingArguments , snake_case__ : PreTrainedTokenizer , snake_case__ : Optional[int] = None , snake_case__ : Union[str, Split] = Split.train , snake_case__ : Optional[bool] = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = "pt" , ):
lowercase = args
lowercase = is_language_sensitive
lowercase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case__ , snake_case__ ):
try:
lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
lowercase = mode
# Load data features from cache or dataset file
lowercase = """v2""" if args.version_2_with_negative else """v1"""
lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase = cached_features_file + """.lock"""
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not args.overwrite_cache:
lowercase = time.time()
lowercase = torch.load(snake_case__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase = self.old_features["""features"""]
lowercase = self.old_features.get("""dataset""" , snake_case__ )
lowercase = self.old_features.get("""examples""" , snake_case__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
""" future run""" )
else:
if mode == Split.dev:
lowercase = self.processor.get_dev_examples(args.data_dir )
else:
lowercase = self.processor.get_train_examples(args.data_dir )
lowercase , lowercase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case__ , )
lowercase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , snake_case__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Optional[int] , snake_case__ : Union[str, Any] ):
# Convert to Tensors and build dataset
lowercase = self.features[i]
lowercase = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase = torch.tensor(feature.start_position , dtype=torch.long )
lowercase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 706
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = multiprocessing.Manager()
lowercase = manager.list()
lowercase = multiprocessing.Process(target=lowerCAmelCase__ ,args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase = shutil.rmtree
lowercase = os.rmdir
lowercase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase = {}
with swallow_io():
with time_limit(lowerCAmelCase__ ):
exec(lowerCAmelCase__ ,lowerCAmelCase__ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
lowercase = rmtree
lowercase = rmdir
lowercase = chdir
@contextlib.contextmanager
def UpperCamelCase__ ( lowerCAmelCase__ ):
def signal_handler(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL ,lowerCAmelCase__ )
signal.signal(signal.SIGALRM ,lowerCAmelCase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL ,0 )
@contextlib.contextmanager
def UpperCamelCase__ ( ):
lowercase = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCAmelCase__ ):
with contextlib.redirect_stderr(lowerCAmelCase__ ):
with redirect_stdin(lowerCAmelCase__ ):
yield
@contextlib.contextmanager
def UpperCamelCase__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCAmelCase__ ):
yield dirname
class A_ ( __a ):
pass
class A_ ( io.StringIO ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : int , **snake_case__ : int ):
raise OSError
def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : int ):
raise OSError
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
raise OSError
def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : Any ):
return False
class A_ ( contextlib._RedirectStream ): # type: ignore
_A :List[Any] = '''stdin'''
@contextlib.contextmanager
def UpperCamelCase__ ( lowerCAmelCase__ ):
if root == ".":
yield
return
lowercase = os.getcwd()
os.chdir(lowerCAmelCase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase = None
lowercase = None
import os
lowercase = """1"""
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
import shutil
lowercase = None
lowercase = None
lowercase = None
import subprocess
lowercase = None # type: ignore
lowercase = None
import sys
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
| 72
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Tuple = StableDiffusionPanoramaPipeline
__lowerCAmelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCAmelCase : str = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase : Union[str, Any] = CLIPTextModel(UpperCamelCase__ )
UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase__ )
UpperCAmelCase : List[str] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : Dict = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCAmelCase : Optional[int] = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase : Dict = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase : Any = sd_pipe(**UpperCamelCase__ ).images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Any = self.get_dummy_components()
UpperCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCAmelCase : Tuple = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase : List[Any] = """french fries"""
UpperCAmelCase : int = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
UpperCAmelCase : List[Any] = output.images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : Tuple = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCAmelCase : int = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase : int = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase : Optional[int] = sd_pipe(**UpperCamelCase__ , view_batch_size=2 )
UpperCAmelCase : Any = output.images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Any = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
UpperCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCAmelCase : Optional[int] = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase : List[str] = sd_pipe(**UpperCamelCase__ ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : Dict = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=UpperCamelCase__ )
UpperCAmelCase : int = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCAmelCase : int = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase : List[Any] = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase : Union[str, Any] = sd_pipe(**UpperCamelCase__ ).images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : int = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = torch.manual_seed(UpperCamelCase__ )
UpperCAmelCase : Optional[int] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase : Tuple = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
UpperCAmelCase : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase : Union[str, Any] = self.get_inputs()
UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase__ ).images
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
UpperCAmelCase : Tuple = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=UpperCamelCase__ )
UpperCAmelCase : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = self.get_inputs()
UpperCAmelCase : Dict = pipe(**UpperCamelCase__ ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
UpperCAmelCase : Union[str, Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = 0
def callback_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase : Optional[Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase : int = latents[0, -3:, -3:, -1]
UpperCAmelCase : Optional[Any] = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Any = """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase : List[str] = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
UpperCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCAmelCase : Dict = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase : List[Any] = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : List[str] = """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase : Union[str, Any] = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
UpperCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCAmelCase : Optional[int] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : str = self.get_inputs()
UpperCAmelCase : str = pipe(**UpperCamelCase__ )
UpperCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 160
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.dummy_uncond_unet
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vq_model
lowercase_ = LDMPipeline(unet=UpperCamelCase__ , vqvae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""numpy""" ).images
lowercase_ = torch.manual_seed(0 )
lowercase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""numpy""" , return_dict=UpperCamelCase__ )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase_ = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=5 , output_type="""numpy""" ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase_ = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase_ = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 412
| 0
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( __snake_case : str, __snake_case : str = "cpu", __snake_case : Union[str, None] = None ) -> None:
"""simple docstring"""
A__ : Optional[int] =torch.load(__snake_case, map_location=__snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__snake_case, torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
A__ : List[str] =v.half()
if save_path is None: # overwrite src_path
A__ : Union[str, Any] =src_path
torch.save(__snake_case, __snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 712
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def a__ ( ) -> None:
UpperCAmelCase__ : str = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase__ : Union[str, Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_lowercase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_lowercase = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_lowercase = tf_top_k_top_p_filtering(UpperCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_lowercase = output[output != -float("""inf""" )]
_lowercase = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-12 )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@require_tf
class _lowercase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
if is_tf_available():
lowerCAmelCase__ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_lowercase = 2
_lowercase = 2
class _lowercase ( tf.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
'''simple docstring'''
super(UpperCAmelCase , self ).__init__()
_lowercase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase , )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.model.generate(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , max_new_tokens=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
_lowercase = [[2, 0], [102, 103]]
_lowercase = [[1, 0], [1, 1]]
_lowercase = DummyModel(model=UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"""serving_default""": dummy_model.serving} )
_lowercase = tf.saved_model.load(UpperCAmelCase ).signatures["""serving_default"""]
for batch_size in range(1 , len(UpperCAmelCase ) + 1 ):
_lowercase = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
_lowercase = serving_func(**UpperCAmelCase )["""sequences"""]
_lowercase = test_model.generate(**UpperCAmelCase , max_new_tokens=UpperCAmelCase )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_lowercase = 1
_lowercase = 2
class _lowercase ( tf.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
'''simple docstring'''
super(UpperCAmelCase , self ).__init__()
_lowercase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase , )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.model.generate(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , max_new_tokens=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
_lowercase = [[2], [102, 103]]
_lowercase = [[1], [1, 1]]
_lowercase = DummyModel(model=UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"""serving_default""": dummy_model.serving} )
_lowercase = tf.saved_model.load(UpperCAmelCase ).signatures["""serving_default"""]
for input_row in range(len(UpperCAmelCase ) ):
_lowercase = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
_lowercase = serving_func(**UpperCAmelCase )["""sequences"""]
_lowercase = test_model.generate(**UpperCAmelCase , max_new_tokens=UpperCAmelCase )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@slow
@require_tensorflow_text
def _UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=UpperCAmelCase )
class _lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
_lowercase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase , """spiece.model""" ) , """rb""" ).read() )
_lowercase = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def _UpperCAmelCase ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.tokenizer.tokenize(UpperCAmelCase )
_lowercase , _lowercase = text.pad_model_inputs(
UpperCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_lowercase = self.model.generate(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
return self.tokenizer.detokenize(UpperCAmelCase )
_lowercase = CompleteSentenceTransformer()
_lowercase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
_lowercase = complete_model(UpperCAmelCase )
_lowercase = tf.keras.Model(UpperCAmelCase , UpperCAmelCase )
keras_model.save(UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
_lowercase = 14
_lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_lowercase = """Hello, my dog is cute and"""
_lowercase = tokenizer(UpperCAmelCase , return_tensors="""tf""" )
_lowercase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_lowercase = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
_lowercase = model.generate(**UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_lowercase = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
_lowercase = model.generate(**UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_lowercase = """Hugging Face is a technology company based in New York and Paris."""
_lowercase = bart_tokenizer(UpperCAmelCase , return_tensors="""tf""" ).input_ids
_lowercase = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_lowercase = bart_model.generate(UpperCAmelCase ).numpy()
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
'''simple docstring'''
return super().call(UpperCAmelCase , **UpperCAmelCase )
_lowercase = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_lowercase = bart_model.generate(UpperCAmelCase , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase , UpperCAmelCase ) )
class _lowercase ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
return super().call(UpperCAmelCase , **UpperCAmelCase )
_lowercase = FakeEncoder(bart_model.config , bart_model.model.shared )
_lowercase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_lowercase = bart_model.generate(UpperCAmelCase ).numpy()
with self.assertRaises(UpperCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase , foo="""bar""" )
| 398
| 0
|
from __future__ import annotations
_lowercase = [True] * 1000001
_lowercase = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
_lowercase = False
i += 1
def UpperCamelCase ( snake_case__):
return seive[n]
def UpperCamelCase ( snake_case__):
return any(digit in "02468" for digit in str(snake_case__))
def UpperCamelCase ( snake_case__ = 1_00_00_00):
lowerCAmelCase_ : Union[str, Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2):
if is_prime(snake_case__) and not contains_an_even_digit(snake_case__):
lowerCAmelCase_ : Optional[Any] = str(snake_case__)
lowerCAmelCase_ : str = [int(str_num[j:] + str_num[:j]) for j in range(len(snake_case__))]
if all(is_prime(snake_case__) for i in list_nums):
result.append(snake_case__)
return result
def UpperCamelCase ( ):
return len(find_circular_primes())
if __name__ == "__main__":
print(f"{len(find_circular_primes()) = }")
| 683
|
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683
| 1
|
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :float = 0 ):
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = row, column
UpperCamelCase__ :Optional[Any] = [[default_value for c in range(lowerCamelCase__ )] for r in range(lowerCamelCase__ )]
def __str__( self :Union[str, Any] ):
UpperCamelCase__ :Optional[Any] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
UpperCamelCase__ :str = 0
for row_vector in self.array:
for obj in row_vector:
UpperCamelCase__ :int = max(lowerCamelCase__ , len(str(lowerCamelCase__ ) ) )
UpperCamelCase__ :str = f"""%{max_element_length}s"""
# Make string and return
def single_line(lowerCamelCase__ :list[float] ) -> str:
nonlocal string_format_identifier
UpperCamelCase__ :Optional[int] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase__ ) for row_vector in self.array )
return s
def __repr__( self :Optional[Any] ):
return str(self )
def __a ( self :str , lowerCamelCase__ :tuple[int, int] ):
if not (isinstance(lowerCamelCase__ , (list, tuple) ) and len(lowerCamelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :Optional[int] , lowerCamelCase__ :tuple[int, int] ):
assert self.validate_indicies(lowerCamelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self :int , lowerCamelCase__ :tuple[int, int] , lowerCamelCase__ :float ):
assert self.validate_indicies(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = value
def __add__( self :int , lowerCamelCase__ :Matrix ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == another.row and self.column == another.column
# Add
UpperCamelCase__ :Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase__ :Dict = self[r, c] + another[r, c]
return result
def __neg__( self :Dict ):
UpperCamelCase__ :str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase__ :Tuple = -self[r, c]
return result
def __sub__( self :Optional[int] , lowerCamelCase__ :Matrix ):
return self + (-another)
def __mul__( self :Union[str, Any] , lowerCamelCase__ :int | float | Matrix ):
if isinstance(lowerCamelCase__ , (int, float) ): # Scalar multiplication
UpperCamelCase__ :Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase__ :Optional[int] = self[r, c] * another
return result
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): # Matrix multiplication
assert self.column == another.row
UpperCamelCase__ :Optional[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCamelCase__ :List[str] = f"""Unsupported type given for another ({type(lowerCamelCase__ )})"""
raise TypeError(lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase__ :Tuple = self[r, c]
return result
def __a ( self :Tuple , lowerCamelCase__ :Matrix , lowerCamelCase__ :Matrix ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCamelCase__ :Optional[int] = v.transpose()
UpperCamelCase__ :Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ) -> None:
# a^(-1)
UpperCamelCase__ :List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCamelCase__ :Optional[Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
UpperCamelCase__ :List[Any] = Matrix(3 , 1 , 0 )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = 1, 2, -3
UpperCamelCase__ :Tuple = Matrix(3 , 1 , 0 )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase__ , lowercase__ )}""" )
def A ( ) -> None:
import doctest
doctest.testmod()
testa()
| 45
|
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45
| 1
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = ["""pixel_values"""]
def __init__( self , lowercase_ = True , lowercase_ = 1 / 2_55 , lowercase_ = True , lowercase_ = 8 , **lowercase_ , ):
super().__init__(**lowercase_)
snake_case_ : int = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : Any = do_pad
snake_case_ : Dict = pad_size
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_ = None):
snake_case_ , snake_case_ : int = get_image_size(lowercase_)
snake_case_ : Optional[Any] = (old_height // size + 1) * size - old_height
snake_case_ : Any = (old_width // size + 1) * size - old_width
return pad(lowercase_ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowercase_)
def snake_case__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : int = do_pad if do_pad is not None else self.do_pad
snake_case_ : int = pad_size if pad_size is not None else self.pad_size
snake_case_ : Optional[int] = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
# All transformations expect numpy arrays.
snake_case_ : Union[str, Any] = [to_numpy_array(lowercase_) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_pad:
snake_case_ : str = [self.pad(lowercase_ , size=lowercase_) for image in images]
snake_case_ : int = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
snake_case_ : Dict = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 92
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = "src/diffusers"
a_ = "."
# This is to make sure the diffusers module imported is the one in the repo.
a_ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ = spec.loader.load_module()
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", __SCREAMING_SNAKE_CASE ) is not None
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[int] = object_name.split("." )
snake_case_ : Dict = 0
# First let's find the module where our object lives.
snake_case_ : int = parts[i]
while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE, f'{module}.py' ) ):
i += 1
if i < len(__SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE, parts[i] )
if i >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__SCREAMING_SNAKE_CASE, f'{module}.py' ), "r", encoding="utf-8", newline="\n" ) as f:
snake_case_ : str = f.readlines()
# Now let's find the class / func in the code!
snake_case_ : Any = ""
snake_case_ : Optional[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case_ : Union[str, Any] = line_index
while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index], __SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Dict = lines[start_index:line_index]
return "".join(__SCREAMING_SNAKE_CASE )
a_ = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a_ = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
a_ = re.compile(R"<FILL\s+[^>]*>")
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : str = code.split("\n" )
snake_case_ : Union[str, Any] = 0
while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__SCREAMING_SNAKE_CASE ):
return re.search(r"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
snake_case_ : Optional[Any] = f'class Bla:\n{code}'
snake_case_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=__SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = black.format_str(__SCREAMING_SNAKE_CASE, mode=__SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ : Optional[Any] = style_docstrings_in_code(__SCREAMING_SNAKE_CASE )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE, "r", encoding="utf-8", newline="\n" ) as f:
snake_case_ : List[Any] = f.readlines()
snake_case_ : str = []
snake_case_ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__SCREAMING_SNAKE_CASE ):
snake_case_ : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = search.groups()
snake_case_ : int = find_code_in_diffusers(__SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = get_indent(__SCREAMING_SNAKE_CASE )
snake_case_ : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case_ : Dict = theoretical_indent
snake_case_ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case_ : str = True
while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
break
snake_case_ : Dict = lines[line_index]
snake_case_ : List[Any] = _should_continue(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and re.search(f'^{indent}# End copy', __SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Optional[int] = lines[start_index:line_index]
snake_case_ : Optional[Any] = "".join(__SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case_ : int = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None]
snake_case_ : Dict = "\n".join(__SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case_ : Union[str, Any] = replace_pattern.replace("with", "" ).split("," )
snake_case_ : Optional[Any] = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = pattern.groups()
snake_case_ : Optional[int] = re.sub(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
snake_case_ : List[Any] = re.sub(obja.lower(), obja.lower(), __SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = re.sub(obja.upper(), obja.upper(), __SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case_ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
snake_case_ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case_ : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case_ : List[str] = start_index + 1
if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(__SCREAMING_SNAKE_CASE, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
return diffs
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
snake_case_ : List[str] = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE, "**/*.py" ), recursive=__SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = []
for filename in all_files:
snake_case_ : Tuple = is_copy_consistent(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case_ : str = "\n".join(__SCREAMING_SNAKE_CASE )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 92
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Optional[int] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 591
|
'''simple docstring'''
class _a :
"""simple docstring"""
def __init__( self , A__ ) -> List[Any]:
# we need a list not a string, so do something to change the type
_SCREAMING_SNAKE_CASE = arr.split(""",""" )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
_SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_SCREAMING_SNAKE_CASE = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_SCREAMING_SNAKE_CASE = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = input("please input some numbers:")
UpperCamelCase__ : List[Any] = SubArray(whole_array)
UpperCamelCase__ : str = array.solve_sub_array()
print(("the results is:", re))
| 591
| 1
|
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : str = BertJapaneseTokenizer
A_ : Dict = False
A_ : Union[str, Any] = True
def _A ( self : Any ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : Dict , a__ : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase__ : str = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _A ( self : Any , a__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.get_input_output_texts(a__ )
lowerCAmelCase__ : int = tokenizer.encode(a__ , add_special_tokens=a__ )
lowerCAmelCase__ : List[Any] = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def _A ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(a__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(a__ )
lowerCAmelCase__ : Optional[Any] = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase__ : List[str] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a__ , "wb" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , "rb" ) as handle:
lowerCAmelCase__ : Any = pickle.load(a__ )
lowerCAmelCase__ : Optional[int] = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : List[Any] ):
'''simple docstring'''
try:
lowerCAmelCase__ : int = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Optional[Any] ):
'''simple docstring'''
try:
lowerCAmelCase__ : Union[str, Any] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = MecabTokenizer(do_lower_case=a__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
try:
lowerCAmelCase__ : Optional[int] = MecabTokenizer(
do_lower_case=a__ , normalize_text=a__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = MecabTokenizer(normalize_text=a__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(a__ )
lowerCAmelCase__ : Dict = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase__ : Tuple = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a__ , "wb" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , "rb" ) as handle:
lowerCAmelCase__ : Any = pickle.load(a__ )
lowerCAmelCase__ : Optional[Any] = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
@require_sudachi
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SudachiTokenizer(do_lower_case=a__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = SudachiTokenizer(normalize_text=a__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SudachiTokenizer(trim_whitespace=a__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(a__ )
lowerCAmelCase__ : Optional[Any] = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase__ : Tuple = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a__ , "wb" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , "rb" ) as handle:
lowerCAmelCase__ : Tuple = pickle.load(a__ )
lowerCAmelCase__ : str = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
@require_jumanpp
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = JumanppTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = JumanppTokenizer(normalize_text=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = JumanppTokenizer(trim_whitespace=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
lowerCAmelCase__ : Optional[Any] = {}
for i, token in enumerate(a__ ):
lowerCAmelCase__ : str = i
lowerCAmelCase__ : List[str] = WordpieceTokenizer(vocab=a__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
lowerCAmelCase__ : List[Any] = tokenizer.subword_tokenizer
lowerCAmelCase__ : Any = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(a__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
lowerCAmelCase__ : Union[str, Any] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(a__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode("ありがとう。" , add_special_tokens=a__ )
lowerCAmelCase__ : str = tokenizer.encode("どういたしまして。" , add_special_tokens=a__ )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a__ )
lowerCAmelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Tuple = BertJapaneseTokenizer
A_ : Optional[Any] = False
def _A ( self : Any ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : Tuple , **a__ : int ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **a__ )
def _A ( self : str , a__ : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase__ : int = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _A ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : Tuple ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
lowerCAmelCase__ : List[str] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
a__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase__ : Union[str, Any] = {}
for i, token in enumerate(a__ ):
lowerCAmelCase__ : Tuple = i
lowerCAmelCase__ : Any = CharacterTokenizer(vocab=a__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
lowerCAmelCase__ : str = tokenizer.encode("ありがとう。" , add_special_tokens=a__ )
lowerCAmelCase__ : List[str] = tokenizer.encode("どういたしまして。" , add_special_tokens=a__ )
lowerCAmelCase__ : int = tokenizer.build_inputs_with_special_tokens(a__ )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = "cl-tohoku/bert-base-japanese"
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
lowerCAmelCase__ : Dict = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 568
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
snake_case = 20_48
snake_case = 40_96
snake_case = 42
snake_case = os.environ.pop("""PROCESS_TRAIN""", """false""")
snake_case = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def choose_first(lowerCamelCase_ , lowerCamelCase_=False ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) == 1:
lowerCAmelCase__ : Union[str, Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase__ : str = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowerCAmelCase__ : Dict = {"id": example["id"]}
lowerCAmelCase__ : Optional[int] = example["annotations"]
lowerCAmelCase__ : Dict = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase__ : str = ["yes"] if 1 in yes_no_answer else ["no"]
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[str] = ["<cls>"]
else:
lowerCAmelCase__ : Optional[Any] = ["short"]
lowerCAmelCase__ : Tuple = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowerCAmelCase__ : str = ["long"]
lowerCAmelCase__ : Tuple = choose_first(annotation["long_answer"] , is_long_answer=lowerCamelCase_ )
lowerCAmelCase__ : Any = []
answer.update(lowerCamelCase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase__ : Any = True
else:
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Tuple = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowerCamelCase_ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = _get_single_answer(lowerCamelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Dict = example["document"]["tokens"]
lowerCAmelCase__ : Union[str, Any] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase__ : List[Any] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCAmelCase__ : List[str] = example["document"]["tokens"]
lowerCAmelCase__ : Union[str, Any] = answer["start_token"]
lowerCAmelCase__ : str = answer["end_token"]
lowerCAmelCase__ : str = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase__ : str = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCAmelCase__ : str = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowerCAmelCase__ : str = doc["token"][answer["start_token"] : answer["end_token"]]
lowerCAmelCase__ : Optional[int] = " ".join([old[i] for i in range(len(lowerCamelCase_ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowerCamelCase_ , end="\n" )
print("Old:" , lowerCamelCase_ , end="\n\n" )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=True ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_context_and_ans(lowerCamelCase_ , assertion=lowerCamelCase_ )
lowerCAmelCase__ : int = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase__ : int = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowerCAmelCase__ : List[str] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = input_ids[:q_len]
lowerCAmelCase__ : List[str] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCAmelCase__ : int = i + max_length - q_len
lowerCAmelCase__ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowerCamelCase_ ),
"end_token": [-1_0_0] * len(lowerCamelCase_ ),
"category": category,
},
}
lowerCAmelCase__ : Optional[Any] = out["context"].split()
lowerCAmelCase__ : Dict = splitted_context[answer["end_token"]]
lowerCAmelCase__ : Union[str, Any] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowerCamelCase_ , ).input_ids )
lowerCAmelCase__ : List[str] = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowerCamelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase__ : Optional[Any] = len(tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase__ : str = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowerCAmelCase__ : str = answer["start_token"]
lowerCAmelCase__ : Union[str, Any] = answer["end_token"]
if assertion:
lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(lowerCamelCase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowerCamelCase_ , end="\n\n" )
if len(lowerCamelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase__ : int = input_ids[:q_len]
lowerCAmelCase__ : List[str] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase__ : Optional[Any] = i + max_length - q_len
lowerCAmelCase__ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase__ : Any = start_token - i + q_len
lowerCAmelCase__ : Optional[Any] = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowerCAmelCase__ : Union[str, Any] = -1_0_0
lowerCAmelCase__ : Optional[Any] = -1_0_0
answers_category.append("null" )
lowerCAmelCase__ : Any = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_ )
answers_end_token.append(lowerCamelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowerCamelCase_ ) )
print("Old:" , tokenizer.decode(lowerCamelCase_ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_=False ):
"""simple docstring"""
lowerCAmelCase__ : Any = get_strided_contexts_and_ans(
lowerCamelCase_ , lowerCamelCase_ , doc_stride=lowerCamelCase_ , max_length=lowerCamelCase_ , assertion=lowerCamelCase_ , )
return example
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with jsonlines.open(lowerCamelCase_ , "a" ) as writer:
for example in tqdm(lowerCamelCase_ , total=len(lowerCamelCase_ ) , desc="Saving samples ... " ):
lowerCAmelCase__ : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
snake_case = load_dataset("""natural_questions""")
snake_case = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
snake_case = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
snake_case = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
snake_case = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
snake_case = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
snake_case = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 568
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : Any=10_00 )->int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A__ = n - 1
A__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A__ = 0
while count < prec:
A__ = random.randint(2 , n - 1 )
A__ = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
A__ = True
for _ in range(__lowercase ):
if b == n - 1:
A__ = False
break
A__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a__: Tuple = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 190
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 686
| 0
|
"""simple docstring"""
from PIL import Image
def lowerCAmelCase_ ( lowercase_ : Image , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowercase_ : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowercase_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
_lowerCamelCase = change_contrast(img, 1_70)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 401
|
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case ( unittest.TestCase ):
def __init__( self :str , _lowerCamelCase :Dict , _lowerCamelCase :int=2 , _lowerCamelCase :Any=5_6 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :int=9_9 , _lowerCamelCase :Optional[int]=3_2 , _lowerCamelCase :int=2 , _lowerCamelCase :List[str]=2 , _lowerCamelCase :Dict=7 , _lowerCamelCase :Union[str, Any]="gelu_new" , _lowerCamelCase :Dict=0.1 , _lowerCamelCase :int=0.1 , _lowerCamelCase :Union[str, Any]=5_1_2 , _lowerCamelCase :Optional[Any]=1_6 , _lowerCamelCase :int=2 , _lowerCamelCase :str=0.0_2 , _lowerCamelCase :Optional[Any]=4 , _lowerCamelCase :Optional[Any]="block_sparse" , _lowerCamelCase :int=True , _lowerCamelCase :Tuple=False , _lowerCamelCase :int=2 , _lowerCamelCase :int=3 , ):
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : Any = is_training
__SCREAMING_SNAKE_CASE : Any = use_attention_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : int = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_choices
__SCREAMING_SNAKE_CASE : str = rescale_embeddings
__SCREAMING_SNAKE_CASE : str = attention_type
__SCREAMING_SNAKE_CASE : Dict = use_bias
__SCREAMING_SNAKE_CASE : List[Any] = block_size
__SCREAMING_SNAKE_CASE : Dict = num_random_blocks
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : str = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Any ):
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase :Dict , _lowerCamelCase :Optional[int]=None , **_lowerCamelCase :List[str] ):
return model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : int = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Any , _lowerCamelCase :str , _lowerCamelCase :Dict , _lowerCamelCase :List[str]=1e-5 , _lowerCamelCase :Dict="outputs" , _lowerCamelCase :str=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 401
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a :Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __a (datasets.BuilderConfig):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = 1_00_00
_SCREAMING_SNAKE_CASE :Optional[List[str]] = None
_SCREAMING_SNAKE_CASE :Optional[datasets.Features] = None
class __a (datasets.ArrowBasedBuilder):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = ParquetConfig
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE__ : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase_ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ : Any = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
SCREAMING_SNAKE_CASE__ : Tuple = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ : Optional[int] = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase_ ):
with open(UpperCamelCase_ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase_ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={"""files""": files} ) )
return splits
def _a ( self , _a ) -> Any:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ : List[Any] = table_cast(UpperCamelCase_ , self.info.features.arrow_schema )
return pa_table
def _a ( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ):
with open(UpperCamelCase_ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = pq.ParquetFile(UpperCamelCase_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE__ : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(UpperCamelCase_ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCamelCase_ )}: {e}''' )
raise
| 680
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase ( ):
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' ,type=_snake_case ,default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' ,type=_snake_case ,default=5 )
parser.add_argument('--batch_size' ,type=_snake_case ,default=6 )
parser.add_argument('--gradient_accumulation_steps' ,type=_snake_case ,default=1 )
parser.add_argument('--freeze' ,type=_snake_case ,default=_snake_case )
parser.add_argument('--learning_rate' ,type=_snake_case ,default=5e-4 )
parser.add_argument('--seed' ,type=_snake_case ,default=0 )
parser.add_argument('--lr_scheduler_type' ,type=_snake_case ,default='cosine' )
parser.add_argument('--num_warmup_steps' ,type=_snake_case ,default=10 )
parser.add_argument('--weight_decay' ,type=_snake_case ,default=0.01 )
parser.add_argument('--output_dir' ,type=_snake_case ,default='./results' )
return parser.parse_args()
UpperCamelCase__ = load('accuracy')
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : int = eval_pred
UpperCAmelCase__ : Optional[int] = np.argmax(_snake_case ,axis=1 )
return metric.compute(predictions=_snake_case ,references=_snake_case )
class a ( lowercase ):
def __init__( self , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : List[str] = trainer
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
if control.should_evaluate:
UpperCAmelCase__ : int = deepcopy(UpperCamelCase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def lowerCamelCase ( ):
UpperCAmelCase__ : int = get_args()
set_seed(args.seed )
UpperCAmelCase__ : Optional[int] = load_dataset('codeparrot/codecomplex' ,split='train' )
UpperCAmelCase__ : Tuple = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase__ : List[Any] = train_test['test'].train_test_split(test_size=0.5 )
UpperCAmelCase__ : Tuple = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase__ : Optional[Any] = tokenizer.eos_token
UpperCAmelCase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7 )
UpperCAmelCase__ : Dict = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[Any] = ClassLabel(num_classes=7 ,names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(_snake_case ):
UpperCAmelCase__ : Dict = tokenizer(example['src'] ,truncation=_snake_case ,max_length=1024 )
UpperCAmelCase__ : Any = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase__ : List[Any] = train_test_validation.map(
_snake_case ,batched=_snake_case ,remove_columns=train_test_validation['train'].column_names ,)
UpperCAmelCase__ : Tuple = DataCollatorWithPadding(tokenizer=_snake_case )
UpperCAmelCase__ : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy='epoch' ,save_strategy='epoch' ,logging_strategy='epoch' ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model='accuracy' ,run_name='complexity-java' ,report_to='wandb' ,)
UpperCAmelCase__ : Dict = Trainer(
model=_snake_case ,args=_snake_case ,train_dataset=tokenized_datasets['train'] ,eval_dataset=tokenized_datasets['valid'] ,tokenizer=_snake_case ,data_collator=_snake_case ,compute_metrics=_snake_case ,)
print('Training...' )
trainer.add_callback(CustomCallback(_snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 110
| 0
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE__ : Any=30 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : int=37 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : int=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=3 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = vocab_size
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxBeitModel(config=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = FlaxBeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxBeitForImageClassification(config=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxBeitForImageClassification(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = FlaxBeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ):
return model(pixel_values=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with self.subTest('JIT Enabled' ):
UpperCamelCase = model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( ) -> Optional[Any]:
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase = np.ones((1, 1_96) , dtype=SCREAMING_SNAKE_CASE__ )
# forward pass
UpperCamelCase = model(pixel_values=SCREAMING_SNAKE_CASE__ , bool_masked_pos=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = (1, 1_96, 81_92)
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-2 ) )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
# forward pass
UpperCamelCase = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = (1, 10_00)
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
UpperCamelCase = 2_81
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
# forward pass
UpperCamelCase = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = (1, 2_18_41)
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
UpperCamelCase = 23_96
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE__ )
| 170
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCamelCase = b.T
UpperCamelCase = np.sum(np.square(_lowercase ) , axis=1 )
UpperCamelCase = np.sum(np.square(_lowercase ) , axis=0 )
UpperCamelCase = np.matmul(_lowercase , _lowercase )
UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict:
UpperCamelCase = x.reshape(-1 , 3 )
UpperCamelCase = squared_euclidean_distance(_lowercase , _lowercase )
return np.argmin(_lowercase , axis=1 )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =["pixel_values"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = size if size is not None else {'height': 2_56, 'width': 2_56}
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ ) if clusters is not None else None
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_normalize
UpperCamelCase = do_color_quantize
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
UpperCamelCase = rescale(image=SCREAMING_SNAKE_CASE__ , scale=1 / 127.5 , data_format=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = image - 1
return image
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Any , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase = clusters if clusters is not None else self.clusters
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_color_quantize:
UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = color_quantize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase = images.shape[0]
UpperCamelCase = images.reshape(SCREAMING_SNAKE_CASE__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase = list(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
UpperCamelCase = {'input_ids': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 170
| 1
|
"""simple docstring"""
def lowercase__ ( lowerCamelCase : list[int] , lowerCamelCase : str ) -> list[int]:
lowerCAmelCase__ : Tuple = int(lowerCamelCase )
# Initialize Result
lowerCAmelCase__ : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(lowerCamelCase ):
# Find denominations
while int(lowerCamelCase ) >= int(lowerCamelCase ):
total_value -= int(lowerCamelCase )
answer.append(lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCAmelCase = []
__UpperCAmelCase = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
__UpperCAmelCase = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
__UpperCAmelCase = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
__UpperCAmelCase = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f"""Following is minimal change for {value}: """)
__UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 308
|
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__UpperCAmelCase = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
__UpperCAmelCase = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
__UpperCAmelCase = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : bool , lowerCamelCase : Optional[Dict[int, int]] = None , lowerCamelCase : bool = False , ) -> List[Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase__ : List[Any] = new_id
# turn into Numpy arrays
lowerCAmelCase__ : int = np.array(lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = np.array(lowerCamelCase )
if reduce_labels:
lowerCAmelCase__ : Any = 2_5_5
lowerCAmelCase__ : Any = label - 1
lowerCAmelCase__ : str = 2_5_5
lowerCAmelCase__ : List[str] = label != ignore_index
lowerCAmelCase__ : str = np.not_equal(lowerCamelCase , lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = pred_label[mask]
lowerCAmelCase__ : Optional[int] = np.array(lowerCamelCase )[mask]
lowerCAmelCase__ : str = pred_label[pred_label == label]
lowerCAmelCase__ : List[Any] = np.histogram(lowerCamelCase , bins=lowerCamelCase , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ : List[Any] = np.histogram(lowerCamelCase , bins=lowerCamelCase , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ : Dict = np.histogram(lowerCamelCase , bins=lowerCamelCase , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ : str = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowercase__ ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : bool , lowerCamelCase : Optional[Dict[int, int]] = None , lowerCamelCase : bool = False , ) -> int:
lowerCAmelCase__ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ : Tuple = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ : Tuple = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = intersect_and_union(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowercase__ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : bool , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Dict[int, int]] = None , lowerCamelCase : bool = False , ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = total_intersect_and_union(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# compute metrics
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Optional[int] = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase__ : Dict = total_area_intersect / total_area_union
lowerCAmelCase__ : Optional[int] = total_area_intersect / total_area_label
lowerCAmelCase__ : Tuple = np.nanmean(lowerCamelCase )
lowerCAmelCase__ : Dict = np.nanmean(lowerCamelCase )
lowerCAmelCase__ : Any = all_acc
lowerCAmelCase__ : Any = iou
lowerCAmelCase__ : Optional[Any] = acc
if nan_to_num is not None:
lowerCAmelCase__ : Optional[int] = {metric: np.nan_to_num(lowerCamelCase , nan=lowerCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def _lowerCAmelCase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def _lowerCAmelCase ( self : List[Any] , _lowercase : Tuple , _lowercase : List[str] , _lowercase : int , _lowercase : bool , _lowercase : Optional[int] = None , _lowercase : Optional[Dict[int, int]] = None , _lowercase : bool = False , ):
lowerCAmelCase__ : List[str] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 308
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = position
__UpperCAmelCase : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__UpperCAmelCase : Dict = []
for position in positions:
__UpperCAmelCase , __UpperCAmelCase : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def __UpperCamelCase ( _UpperCAmelCase ):
return not any(elem == 0 for row in board for elem in row )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase, len(_UpperCAmelCase ) ):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = position
if board[y][x] == 0:
__UpperCAmelCase : str = curr + 1
if open_knight_tour_helper(_UpperCAmelCase, _UpperCAmelCase, curr + 1 ):
return True
__UpperCAmelCase : Any = 0
return False
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Dict = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__UpperCAmelCase : Tuple = 1
if open_knight_tour_helper(_UpperCAmelCase, (i, j), 1 ):
return board
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[Any] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
__UpperCAmelCase : str = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
__UpperCAmelCase : List[Any] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
__UpperCAmelCase : Any = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.tokenizer.model_input_names
__UpperCAmelCase : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 329
| 1
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __snake_case ( *lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_=True , lowerCAmelCase_=2 ) -> int:
from .. import __version__
SCREAMING_SNAKE_CASE__ = take_from
SCREAMING_SNAKE_CASE__ = ()
if not isinstance(args[0] , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCAmelCase_ ).base_version ) >= version.parse(lowerCAmelCase_ ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
SCREAMING_SNAKE_CASE__ = None
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCAmelCase_ ),)
SCREAMING_SNAKE_CASE__ = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
values += (getattr(lowerCAmelCase_ , lowerCAmelCase_ ),)
SCREAMING_SNAKE_CASE__ = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE__ = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
SCREAMING_SNAKE_CASE__ = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowerCAmelCase_ , stacklevel=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE__ = call_frame.filename
SCREAMING_SNAKE_CASE__ = call_frame.lineno
SCREAMING_SNAKE_CASE__ = call_frame.function
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowerCAmelCase_ ) == 0:
return
elif len(lowerCAmelCase_ ) == 1:
return values[0]
return values
| 100
|
from ...configuration_utils import PretrainedConfig
lowerCamelCase__ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Tuple = "tapas"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10_24 , a__=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , a__=0.02 , a__=1e-12 , a__=0 , a__=10.0 , a__=0 , a__=1.0 , a__=None , a__=1.0 , a__=False , a__=None , a__=1.0 , a__=1.0 , a__=False , a__=False , a__="ratio" , a__=None , a__=None , a__=64 , a__=32 , a__=False , a__=True , a__=False , a__=False , a__=True , a__=False , a__=None , a__=None , **a__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=a__ , **a__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__snake_case :Optional[Any] = vocab_size
__snake_case :Tuple = hidden_size
__snake_case :Tuple = num_hidden_layers
__snake_case :Any = num_attention_heads
__snake_case :List[str] = hidden_act
__snake_case :List[str] = intermediate_size
__snake_case :int = hidden_dropout_prob
__snake_case :List[Any] = attention_probs_dropout_prob
__snake_case :Any = max_position_embeddings
__snake_case :Optional[int] = type_vocab_sizes
__snake_case :Tuple = initializer_range
__snake_case :Any = layer_norm_eps
# Fine-tuning task hyperparameters
__snake_case :Optional[int] = positive_label_weight
__snake_case :Optional[Any] = num_aggregation_labels
__snake_case :Dict = aggregation_loss_weight
__snake_case :str = use_answer_as_supervision
__snake_case :int = answer_loss_importance
__snake_case :List[Any] = use_normalized_answer_loss
__snake_case :List[str] = huber_loss_delta
__snake_case :Any = temperature
__snake_case :Optional[int] = aggregation_temperature
__snake_case :List[str] = use_gumbel_for_cells
__snake_case :Optional[int] = use_gumbel_for_aggregation
__snake_case :Optional[Any] = average_approximation_function
__snake_case :Union[str, Any] = cell_selection_preference
__snake_case :Tuple = answer_loss_cutoff
__snake_case :Union[str, Any] = max_num_rows
__snake_case :Optional[int] = max_num_columns
__snake_case :str = average_logits_per_cell
__snake_case :Union[str, Any] = select_one_column
__snake_case :Optional[int] = allow_empty_column_selection
__snake_case :Optional[Any] = init_cell_selection_weights_to_zero
__snake_case :Tuple = reset_position_index_per_cell
__snake_case :Any = disable_per_token_loss
# Aggregation hyperparameters
__snake_case :List[str] = aggregation_labels
__snake_case :int = no_aggregation_label_index
if isinstance(self.aggregation_labels , a__ ):
__snake_case :Any = {int(a__ ): v for k, v in aggregation_labels.items()}
| 455
| 0
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCamelCase ( A_ : str = "isbn/0140328726" ) -> dict:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] =olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
UpperCamelCase__ : List[str] =f'''{olid} is not a valid Open Library olid'''
raise ValueError(A_ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def _lowerCamelCase ( A_ : dict ) -> dict:
'''simple docstring'''
UpperCamelCase__ : Tuple ={
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
UpperCamelCase__ : List[Any] ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase__ : Any =[
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
UpperCamelCase__ : Optional[Any] =data["First sentence"]["value"]
for key, value in data.items():
if isinstance(A_ , A_ ):
UpperCamelCase__ : List[Any] =", ".join(A_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__UpperCAmelCase = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__UpperCAmelCase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("""\n""".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 702
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''CLIPImageProcessor'''
snake_case__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict =kwargs.pop("feature_extractor")
UpperCamelCase__ : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
UpperCamelCase__ : Optional[int] =self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if images is not None:
UpperCamelCase__ : int =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if text is not None and images is not None:
UpperCamelCase__ : str =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE) , tensor_type=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict =self.tokenizer.model_input_names
UpperCamelCase__ : List[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 582
| 0
|
from __future__ import annotations
from typing import Generic, TypeVar
lowercase_ : Optional[Any] = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: int= data
SCREAMING_SNAKE_CASE__: Any= self
SCREAMING_SNAKE_CASE__: List[Any]= 0
class _lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
SCREAMING_SNAKE_CASE__: dict[T, DisjointSetTreeNode[T]]= {}
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
# create a new set with x as its member
SCREAMING_SNAKE_CASE__: List[str]= DisjointSetTreeNode(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
SCREAMING_SNAKE_CASE__: Dict= self.map[data]
if elem_ref != elem_ref.parent:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
SCREAMING_SNAKE_CASE__: Union[str, Any]= nodea
else:
SCREAMING_SNAKE_CASE__: List[str]= nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(lowerCAmelCase ) , self.find_set(lowerCAmelCase ) )
class _lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
SCREAMING_SNAKE_CASE__: dict[T, dict[T, int]]= {}
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
SCREAMING_SNAKE_CASE__: Tuple= {}
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
# add an edge with the given weight
self.add_node(lowerCAmelCase )
self.add_node(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= weight
SCREAMING_SNAKE_CASE__: Dict= weight
def UpperCamelCase_ ( self ) -> GraphUndirectedWeighted[T]:
SCREAMING_SNAKE_CASE__: Any= []
SCREAMING_SNAKE_CASE__: Union[str, Any]= set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCAmelCase : x[2] )
# creating the disjoint set
SCREAMING_SNAKE_CASE__: Union[str, Any]= DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCAmelCase )
# MST generation
SCREAMING_SNAKE_CASE__: str= 0
SCREAMING_SNAKE_CASE__: List[Any]= 0
SCREAMING_SNAKE_CASE__: str= GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= edges[index]
index += 1
SCREAMING_SNAKE_CASE__: Optional[int]= disjoint_set.find_set(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= disjoint_set.find_set(lowerCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
disjoint_set.union(lowerCAmelCase , lowerCAmelCase )
return graph
| 64
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
SCREAMING_SNAKE_CASE : List[str] = '''bert-base-cased'''
SCREAMING_SNAKE_CASE : Dict = '''google/pegasus-xsum'''
SCREAMING_SNAKE_CASE : int = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
SCREAMING_SNAKE_CASE : List[Any] = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
SCREAMING_SNAKE_CASE : str = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE : str = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE : List[str] = '''sshleifer/tiny-mbart'''
SCREAMING_SNAKE_CASE : str = '''sshleifer/tiny-marian-en-de'''
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = '\n'.join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open('w' ).writelines(lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__ ,f'''{split}.source''' ) ,lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__ ,f'''{split}.target''' ) ,lowerCAmelCase__ )
return tmp_dir
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained(__a )
A__ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
A__ = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
A__ = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
A__ = 4
A__ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
A__ , A__ = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
A__ = SeqaSeqDataset(
__a , data_dir=__a , type_path='train' , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
A__ = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__a , __a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
A__ = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained(__a )
A__ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
A__ = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
A__ = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
A__ = 4
A__ = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path='train' , max_source_length=20 , max_target_length=__a , )
A__ = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
A__ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
A__ = tmp_dir.joinpath('train.source' ).open().readlines()
A__ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__a , __a , 128 , __a )
A__ = {x.name for x in tmp_dir.iterdir()}
A__ = {x.name for x in save_dir.iterdir()}
A__ = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a ) < len(__a )
assert len(__a ) == 1
assert len(packed_examples[0] ) == sum(len(__a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
A__ , A__ , A__ = self._get_dataset(max_len=64 )
A__ = 64
A__ = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a )
A__ = [len(__a ) for x in batch_sampler]
assert len(set(__a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a ) == len(__a ) # no dropped or added examples
A__ = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2 )
A__ = []
A__ = []
for batch in data_loader:
A__ = batch['input_ids'].shape
A__ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
A__ = np.product(batch['input_ids'].shape )
num_src_per_batch.append(__a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a )
assert num_src_per_batch[0] == max(__a )
if failures:
raise AssertionError(f'''too many tokens in {len(__a )} batches''' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ , A__ = self._get_dataset(max_len=512 )
A__ = 2
A__ = ds.make_sortish_sampler(__a , shuffle=__a )
A__ = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 )
A__ = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a )
A__ = tokenizer.pad_token_id
def count_pad_tokens(__a , __a="input_ids" ):
return [batch[k].eq(__a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k='labels' ) ) < sum(count_pad_tokens(__a , k='labels' ) )
assert sum(count_pad_tokens(__a ) ) < sum(count_pad_tokens(__a ) )
assert len(__a ) == len(__a )
def _UpperCAmelCase ( self , __a=1000 , __a=128 ):
"""simple docstring"""
if os.getenv('USE_REAL_DATA' , __a ):
A__ = 'examples/seq2seq/wmt_en_ro'
A__ = max_len * 2 * 64
if not Path(__a ).joinpath('train.len' ).exists():
save_len_file(__a , __a )
else:
A__ = 'examples/seq2seq/test_data/wmt_en_ro'
A__ = max_len * 4
save_len_file(__a , __a )
A__ = AutoTokenizer.from_pretrained(__a )
A__ = SeqaSeqDataset(
__a , data_dir=__a , type_path='train' , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ , A__ = self._get_dataset()
A__ = set(DistributedSortishSampler(__a , 256 , num_replicas=2 , rank=0 , add_extra_examples=__a ) )
A__ = set(DistributedSortishSampler(__a , 256 , num_replicas=2 , rank=1 , add_extra_examples=__a ) )
assert idsa.intersection(__a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained(__a , use_fast=__a )
if tok_name == MBART_TINY:
A__ = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
A__ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
A__ = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
A__ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a ) == 1 if tok_name == BART_TINY else len(__a ) == 0
| 260
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Any , a: Optional[Any] , a: Optional[int] ):
__lowerCamelCase : int = params
__lowerCamelCase : List[Any] = np.array(a )
__lowerCamelCase : Any = np.array([len(a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self: Optional[int] , a: Any ):
return (self.token_ids[index], self.lengths[index])
def __len__( self: Dict ):
return len(self.lengths )
def _snake_case ( self: Optional[int] ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : List[str] = self.params.max_model_input_size
__lowerCamelCase : str = self.lengths > max_len
logger.info(F'Splitting {sum(a )} too long sequences.' )
def divide_chunks(a: Optional[Any] , a: List[str] ):
return [l[i : i + n] for i in range(0 , len(a ) , a )]
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : int = []
if self.params.mlm:
__lowerCamelCase , __lowerCamelCase : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
__lowerCamelCase , __lowerCamelCase : str = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__lowerCamelCase : Optional[int] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__lowerCamelCase : Any = np.insert(a , 0 , a )
if sub_s[-1] != sep_id:
__lowerCamelCase : str = np.insert(a , len(a ) , a )
assert len(a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a )
new_tok_ids.extend(a )
new_lengths.extend([len(a ) for l in sub_seqs] )
__lowerCamelCase : Tuple = np.array(a )
__lowerCamelCase : List[Any] = np.array(a )
def _snake_case ( self: int ):
__lowerCamelCase : Dict = len(self )
__lowerCamelCase : Union[str, Any] = self.lengths > 11
__lowerCamelCase : Optional[Any] = self.token_ids[indices]
__lowerCamelCase : int = self.lengths[indices]
__lowerCamelCase : List[Any] = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def _snake_case ( self: Any ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
__lowerCamelCase : List[Any] = self.params.special_tok_ids['unk_token']
__lowerCamelCase : Any = len(self )
__lowerCamelCase : List[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__lowerCamelCase : Optional[int] = (unk_occs / self.lengths) < 0.5
__lowerCamelCase : Any = self.token_ids[indices]
__lowerCamelCase : int = self.lengths[indices]
__lowerCamelCase : Tuple = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def _snake_case ( self: List[str] ):
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self: List[Any] , a: Dict ):
__lowerCamelCase : Dict = [t[0] for t in batch]
__lowerCamelCase : List[str] = [t[1] for t in batch]
assert len(a ) == len(a )
# Max for paddings
__lowerCamelCase : Tuple = max(a )
# Pad token ids
if self.params.mlm:
__lowerCamelCase : Union[str, Any] = self.params.special_tok_ids['pad_token']
else:
__lowerCamelCase : Tuple = self.params.special_tok_ids['unk_token']
__lowerCamelCase : str = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids]
assert len(tk_ ) == len(a )
assert all(len(a ) == max_seq_len_ for t in tk_ )
__lowerCamelCase : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__lowerCamelCase : Dict = torch.tensor(a ) # (bs)
return tk_t, lg_t
| 230
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 230
| 1
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ : List[str] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
a_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def UpperCAmelCase ( A__: Optional[Any] , A__: Tuple=False ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[str] = create_model(
'HTSAT-tiny' , 'roberta' , A__ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=A__ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCAmelCase ( A__: Tuple ) -> int:
__lowerCamelCase : Any = {}
__lowerCamelCase : str = r'.*sequential.(\d+).*'
__lowerCamelCase : Any = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCamelCase : int = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
__lowerCamelCase : List[Any] = re.match(A__ , A__ ).group(1 )
__lowerCamelCase : str = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(A__ )//3}.linear.''' )
elif re.match(A__ , A__ ):
__lowerCamelCase : int = int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCamelCase : Dict = 1 if projecton_layer == 0 else 2
__lowerCamelCase : List[str] = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCamelCase : Dict = value
__lowerCamelCase : str = mixed_qkv.size(0 ) // 3
__lowerCamelCase : List[Any] = mixed_qkv[:qkv_dim]
__lowerCamelCase : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCamelCase : List[Any] = mixed_qkv[qkv_dim * 2 :]
__lowerCamelCase : Optional[Any] = query_layer
__lowerCamelCase : Tuple = key_layer
__lowerCamelCase : Any = value_layer
else:
__lowerCamelCase : str = value
return model_state_dict
def UpperCAmelCase ( A__: str , A__: Dict , A__: Tuple , A__: Union[str, Any]=False ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
__lowerCamelCase : Dict = clap_model.state_dict()
__lowerCamelCase : Optional[int] = rename_state_dict(A__ )
__lowerCamelCase : List[Any] = ClapConfig()
__lowerCamelCase : Dict = enable_fusion
__lowerCamelCase : Any = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
a_ : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 594
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Optional[int] = 'data2vec-vision'
def __init__( self , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-12 , __a=224 , __a=16 , __a=3 , __a=False , __a=False , __a=False , __a=False , __a=0.1 , __a=0.1 , __a=True , __a=[3, 5, 7, 11] , __a=[1, 2, 3, 6] , __a=True , __a=0.4 , __a=256 , __a=1 , __a=False , __a=255 , **__a , ):
super().__init__(**__a )
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : List[Any] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : List[Any] = layer_norm_eps
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : Dict = patch_size
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : List[str] = use_mask_token
__lowerCamelCase : List[Any] = use_absolute_position_embeddings
__lowerCamelCase : List[str] = use_relative_position_bias
__lowerCamelCase : str = use_shared_relative_position_bias
__lowerCamelCase : List[str] = layer_scale_init_value
__lowerCamelCase : int = drop_path_rate
__lowerCamelCase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCamelCase : Union[str, Any] = out_indices
__lowerCamelCase : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCamelCase : str = use_auxiliary_head
__lowerCamelCase : Optional[int] = auxiliary_loss_weight
__lowerCamelCase : List[str] = auxiliary_channels
__lowerCamelCase : List[str] = auxiliary_num_convs
__lowerCamelCase : Optional[Any] = auxiliary_concat_input
__lowerCamelCase : int = semantic_loss_ignore_index
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Optional[int] = version.parse('1.11' )
@property
def snake_case_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ):
return 1E-4
| 594
| 1
|
class snake_case_ :
'''simple docstring'''
def __init__( self : List[str] ) -> int:
lowerCamelCase_ : Optional[Any] = {}
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> None:
print(self.vertex )
for i in self.vertex:
print(__magic_name__ , " -> " , " -> ".join([str(__magic_name__ ) for j in self.vertex[i]] ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__magic_name__ )
else:
# else make a new vertex
lowerCamelCase_ : Dict = [to_vertex]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> None:
# visited array for storing already visited nodes
lowerCamelCase_ : Union[str, Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : list ) -> None:
# mark start vertex as visited
lowerCamelCase_ : Dict = True
print(__magic_name__ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
snake_case_ : Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 253
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
snake_case_ : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase_ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowerCamelCase_ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowerCamelCase_ : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCamelCase_ : Tuple = value
elif weight_type == "weight_g":
lowerCamelCase_ : Optional[int] = value
elif weight_type == "weight_v":
lowerCamelCase_ : Tuple = value
elif weight_type == "bias":
lowerCamelCase_ : str = value
elif weight_type == "running_mean":
lowerCamelCase_ : List[str] = value
elif weight_type == "running_var":
lowerCamelCase_ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ : Tuple = value
elif weight_type == "inv_freq":
lowerCamelCase_ : Union[str, Any] = value
else:
lowerCamelCase_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = []
lowerCamelCase_ : int = fairseq_model.state_dict()
lowerCamelCase_ : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : Any = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ : int = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ : Dict = True
if "*" in mapped_key:
lowerCamelCase_ : Union[str, Any] = name.split(__UpperCAmelCase )[0].split("." )[-2]
lowerCamelCase_ : Any = mapped_key.replace("*" , __UpperCAmelCase )
if "pos_bias_u" in name:
lowerCamelCase_ : str = None
elif "pos_bias_v" in name:
lowerCamelCase_ : Optional[int] = None
elif "weight_g" in name:
lowerCamelCase_ : str = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ : Any = "weight_v"
elif "bias" in name:
lowerCamelCase_ : Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : List[Any] = "weight"
elif "running_mean" in name:
lowerCamelCase_ : Union[str, Any] = "running_mean"
elif "inv_freq" in name:
lowerCamelCase_ : Optional[Any] = "inv_freq"
elif "running_var" in name:
lowerCamelCase_ : int = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ : Union[str, Any] = "num_batches_tracked"
else:
lowerCamelCase_ : List[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def __a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = full_name.split("conv_layers." )[-1]
lowerCamelCase_ : Union[str, Any] = name.split("." )
lowerCamelCase_ : Optional[int] = int(items[0] )
lowerCamelCase_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
lowerCamelCase_ : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
lowerCamelCase_ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
lowerCamelCase_ : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
lowerCamelCase_ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
lowerCamelCase_ : Any = WavaVecaConformerConfig.from_pretrained(__UpperCAmelCase , hidden_act="swish" )
else:
lowerCamelCase_ : List[str] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase_ : str = "rotary"
if is_finetuned:
if dict_path:
lowerCamelCase_ : int = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ : List[Any] = target_dict.pad_index
lowerCamelCase_ : List[str] = target_dict.bos_index
lowerCamelCase_ : Any = target_dict.eos_index
lowerCamelCase_ : Any = len(target_dict.symbols )
lowerCamelCase_ : Union[str, Any] = os.path.join(__UpperCAmelCase , "vocab.json" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCamelCase_ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : List[Any] = 1
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : List[Any] = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__UpperCAmelCase , )
lowerCamelCase_ : Dict = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
lowerCamelCase_ : Dict = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Dict = WavaVecaConformerForCTC(__UpperCAmelCase )
else:
lowerCamelCase_ : int = WavaVecaConformerForPreTraining(__UpperCAmelCase )
if is_finetuned:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase_ : Optional[int] = argparse.Namespace(task="audio_pretraining" )
lowerCamelCase_ : List[Any] = fairseq.tasks.setup_task(__UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase )
lowerCamelCase_ : List[str] = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
snake_case_ : Optional[Any] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 253
| 1
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __magic_name__ ( A__ ):
lowercase : List[Any] =['''image_processor''']
lowercase : Optional[int] ='''SamImageProcessor'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
UpperCAmelCase = self.image_processor
UpperCAmelCase = -10
UpperCAmelCase = self.image_processor.size["longest_edge"]
def __call__( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCAmelCase = encoding_image_processor["original_sizes"]
if hasattr(UpperCamelCase__ , "numpy" ): # Checks if Torch or TF tensor
UpperCAmelCase = original_sizes.numpy()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._check_and_preprocess_points(
input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , input_boxes=UpperCamelCase__ , )
UpperCAmelCase = self._normalize_and_convert(
UpperCamelCase__ , UpperCamelCase__ , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , input_boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict="pt" , ) -> int:
'''simple docstring'''
if input_points is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
UpperCAmelCase = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , original_sizes[0] ) for point in input_points
]
else:
UpperCAmelCase = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , UpperCamelCase__ )
for point, original_size in zip(UpperCamelCase__ , UpperCamelCase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCAmelCase , UpperCAmelCase = self._pad_points_and_labels(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = np.array(UpperCamelCase__ )
if input_labels is not None:
UpperCAmelCase = np.array(UpperCamelCase__ )
if input_boxes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
UpperCAmelCase = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , original_sizes[0] , is_bounding_box=UpperCamelCase__ )
for box in input_boxes
]
else:
UpperCAmelCase = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , UpperCamelCase__ , is_bounding_box=UpperCamelCase__ )
for box, original_size in zip(UpperCamelCase__ , UpperCamelCase__ )
]
UpperCAmelCase = np.array(UpperCamelCase__ )
if input_boxes is not None:
if return_tensors == "pt":
UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
# boxes batch size of 1 by default
UpperCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCAmelCase = tf.convert_to_tensor(UpperCamelCase__ )
# boxes batch size of 1 by default
UpperCAmelCase = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
UpperCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCAmelCase = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
UpperCAmelCase = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
UpperCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCAmelCase = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
UpperCAmelCase = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = max([point.shape[0] for point in input_points] )
UpperCAmelCase = []
for i, point in enumerate(UpperCamelCase__ ):
if point.shape[0] != expected_nb_points:
UpperCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCAmelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCamelCase__ )
UpperCAmelCase = processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=False ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = original_size
UpperCAmelCase , UpperCAmelCase = self.image_processor._get_preprocess_shape(UpperCamelCase__ , longest_edge=UpperCamelCase__ )
UpperCAmelCase = deepcopy(UpperCamelCase__ ).astype(UpperCamelCase__ )
if is_bounding_box:
UpperCAmelCase = coords.reshape(-1 , 2 , 2 )
UpperCAmelCase = coords[..., 0] * (new_w / old_w)
UpperCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCAmelCase = coords.reshape(-1 , 4 )
return coords
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_points is not None:
if hasattr(UpperCamelCase__ , "numpy" ): # Checks for TF or Torch tensor
UpperCAmelCase = input_points.numpy().tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(input_points[0] , UpperCamelCase__ ):
raise ValueError("Input points must be a list of list of floating points." )
UpperCAmelCase = [np.array(UpperCamelCase__ ) for input_point in input_points]
else:
UpperCAmelCase = None
if input_labels is not None:
if hasattr(UpperCamelCase__ , "numpy" ):
UpperCAmelCase = input_labels.numpy().tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(input_labels[0] , UpperCamelCase__ ):
raise ValueError("Input labels must be a list of list integers." )
UpperCAmelCase = [np.array(UpperCamelCase__ ) for label in input_labels]
else:
UpperCAmelCase = None
if input_boxes is not None:
if hasattr(UpperCamelCase__ , "numpy" ):
UpperCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(UpperCamelCase__ , UpperCamelCase__ )
or not isinstance(input_boxes[0] , UpperCamelCase__ )
or not isinstance(input_boxes[0][0] , UpperCamelCase__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
UpperCAmelCase = [np.array(UpperCamelCase__ ).astype(np.floataa ) for box in input_boxes]
else:
UpperCAmelCase = None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
return self.image_processor.post_process_masks(*UpperCamelCase__ , **UpperCamelCase__ )
| 323
|
from manim import *
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("CPU" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
UpperCAmelCase = [mem.copy() for i in range(1 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("GPU" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("Model" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
UpperCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for i, rect in enumerate(UpperCamelCase__ ):
UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
UpperCAmelCase = 0.46 / 4
UpperCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 323
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['GLPNFeatureExtractor']
__UpperCAmelCase = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE : int = state_dict.pop(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> Any:
SCREAMING_SNAKE_CASE : List[str] = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : str = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : str = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : str = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Any = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : List[str] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Any , snake_case_ : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = image.size
SCREAMING_SNAKE_CASE : Union[str, Any] = max(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] = 800 if 'detection' in checkpoint_url else 1000
SCREAMING_SNAKE_CASE : str = target_max_size / current_max_size
SCREAMING_SNAKE_CASE : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = F.to_tensor(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = F.normalize(snake_case_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] ) -> Tuple:
logger.info('Converting model...' )
# load original state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : Dict = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = 15
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'table', 1: 'table rotated'}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE : List[str] = 125
SCREAMING_SNAKE_CASE : Dict = 6
SCREAMING_SNAKE_CASE : Optional[Any] = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Any = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE : Tuple = TableTransformerForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE : Optional[Any] = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
SCREAMING_SNAKE_CASE : str = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=snake_case_ )
SCREAMING_SNAKE_CASE : Dict = Image.open(snake_case_ ).convert('RGB' )
SCREAMING_SNAKE_CASE : Optional[int] = normalize(resize(snake_case_ , snake_case_ ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case_ )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = (1, 15, 3)
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
SCREAMING_SNAKE_CASE : List[Any] = (1, 125, 7)
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
SCREAMING_SNAKE_CASE : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(snake_case_ )
image_processor.push_to_hub(snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 220
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
SCREAMING_SNAKE_CASE_:int = """</w>"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """@@ """
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : int = set()
A : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A : int = char
return pairs
# Speech2Text2 has no max input length
SCREAMING_SNAKE_CASE_:str = {"""facebook/s2t-wav2vec2-large-en-de""": 1_024}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self, lowerCamelCase__, lowerCamelCase__="<s>", lowerCamelCase__="<pad>", lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__=False, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(
unk_token=lowerCamelCase__, bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, pad_token=lowerCamelCase__, do_lower_case=lowerCamelCase__, **lowerCamelCase__, )
A : Dict = do_lower_case
with open(lowerCamelCase__, encoding="""utf-8""" ) as vocab_handle:
A : Tuple = json.load(lowerCamelCase__ )
A : Any = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A : Optional[int] = None
A : Any = None
else:
with open(lowerCamelCase__, encoding="""utf-8""" ) as merges_handle:
A : Optional[int] = merges_handle.read().split("""\n""" )[:-1]
A : Any = [tuple(merge.split()[:2] ) for merge in merges]
A : Tuple = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : str = {}
@property
def _lowerCAmelCase ( self ):
return len(self.decoder )
def _lowerCAmelCase ( self ):
return dict(self.encoder, **self.added_tokens_encoder )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[str] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A : int = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
A : Dict = min(lowerCamelCase__, key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__, float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A : List[Any] = bigram
A : Optional[Any] = []
A : Union[str, Any] = 0
while i < len(lowerCamelCase__ ):
try:
A : Optional[int] = word.index(lowerCamelCase__, lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A : List[Any] = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A : Dict = tuple(lowerCamelCase__ )
A : Optional[Any] = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
A : Optional[Any] = get_pairs(lowerCamelCase__ )
A : List[str] = """ """.join(lowerCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A : Optional[int] = """\n""" + BPE_TOKEN_MERGES
if word.endswith(lowerCamelCase__ ):
A : str = word.replace(lowerCamelCase__, """""" )
A : Union[str, Any] = word.replace(""" """, lowerCamelCase__ )
A : List[Any] = word
return word
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
A : Optional[Any] = text.lower()
A : Tuple = text.split()
A : str = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCamelCase__ ).split(""" """ ) ) )
return split_tokens
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.encoder.get(lowerCamelCase__, self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self.decoder.get(lowerCamelCase__, self.unk_token )
return result
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = """ """.join(lowerCamelCase__ )
# make sure @@ tokens are concatenated
A : Tuple = """""".join(string.split(lowerCamelCase__ ) )
return string
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Optional[Any] = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A : Tuple = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase__, ensure_ascii=lowerCamelCase__ ) + """\n""" )
A : Dict = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCamelCase__, """w""", encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
A : List[Any] = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 662
|
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662
| 1
|
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = TypeVar("DatasetType", Dataset, IterableDataset)
def __a ( _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_lowercase ):
if not isinstance(_lowercase , (Dataset, IterableDataset) ):
if isinstance(_lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowercase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowercase ) )}\']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowercase ).__name__}.""" )
if i == 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = (
(Dataset, IterableDataset) if isinstance(_lowercase , _lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowercase , _lowercase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowercase , _lowercase , _lowercase , info=_lowercase , split=_lowercase , stopping_strategy=_lowercase )
else:
return _interleave_iterable_datasets(
_lowercase , _lowercase , _lowercase , info=_lowercase , split=_lowercase , stopping_strategy=_lowercase )
def __a ( _lowercase , _lowercase = None , _lowercase = None , _lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_lowercase ):
if not isinstance(_lowercase , (Dataset, IterableDataset) ):
if isinstance(_lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowercase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowercase ) )}\']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowercase ).__name__}.""" )
if i == 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(_lowercase , _lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowercase , _lowercase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowercase , info=_lowercase , split=_lowercase , axis=_lowercase )
else:
return _concatenate_iterable_datasets(_lowercase , info=_lowercase , split=_lowercase , axis=_lowercase )
| 702
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = AudioClassificationPipeline(model=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase )
# test with a raw waveform
lowerCamelCase__ : Optional[int] = np.zeros((3_40_00,) )
lowerCamelCase__ : Optional[Any] = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def lowercase_ ( self :Any ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple = examples
lowerCamelCase__ : Tuple = audio_classifier(__UpperCAmelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__UpperCAmelCase ,[
{'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )},
{'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )},
] ,)
lowerCamelCase__ : Dict = audio_classifier(__UpperCAmelCase ,top_k=1 )
self.assertEqual(
__UpperCAmelCase ,[
{'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )},
] ,)
self.run_torchaudio(__UpperCAmelCase )
@require_torchaudio
def lowercase_ ( self :List[Any] ,__UpperCAmelCase :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
import datasets
# test with a local file
lowerCamelCase__ : List[Any] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
lowerCamelCase__ : Union[str, Any] = dataset[0]['''audio''']['''array''']
lowerCamelCase__ : Any = audio_classifier(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase ,[
{'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )},
{'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )},
] ,)
@require_torch
def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''anton-l/wav2vec2-random-tiny-classifier'''
lowerCamelCase__ : List[Any] = pipeline('''audio-classification''' ,model=__UpperCAmelCase )
lowerCamelCase__ : Dict = np.ones((80_00,) )
lowerCamelCase__ : List[Any] = audio_classifier(__UpperCAmelCase ,top_k=4 )
lowerCamelCase__ : int = [
{'''score''': 0.0_842, '''label''': '''no'''},
{'''score''': 0.0_838, '''label''': '''up'''},
{'''score''': 0.0_837, '''label''': '''go'''},
{'''score''': 0.0_834, '''label''': '''right'''},
]
lowerCamelCase__ : str = [
{'''score''': 0.0_845, '''label''': '''stop'''},
{'''score''': 0.0_844, '''label''': '''on'''},
{'''score''': 0.0_841, '''label''': '''right'''},
{'''score''': 0.0_834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCamelCase__ : Optional[Any] = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowerCamelCase__ : List[Any] = audio_classifier(__UpperCAmelCase ,top_k=4 )
self.assertIn(nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase_ ( self :Optional[int] ) -> int:
"""simple docstring"""
import datasets
lowerCamelCase__ : Optional[int] = '''superb/wav2vec2-base-superb-ks'''
lowerCamelCase__ : Optional[int] = pipeline('''audio-classification''' ,model=__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' ,'''ks''' ,split='''test''' )
lowerCamelCase__ : Dict = np.array(dataset[3]['''speech'''] ,dtype=np.floataa )
lowerCamelCase__ : List[Any] = audio_classifier(__UpperCAmelCase ,top_k=4 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ,decimals=3 ) ,[
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] ,)
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
| 121
| 0
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names
| 34
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 200
| 0
|
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be\n and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
UpperCamelCase__ = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCamelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(__a , __a )
lowerCamelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
UpperCamelCase__ = list(s_dict.keys() )
for key in keys:
UpperCamelCase__ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
UpperCamelCase__ = new_key.replace(__a , __a )
print(f"{key} -> {new_key}" )
UpperCamelCase__ = s_dict.pop(__a )
return s_dict
def __magic_name__ ( __a : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(__a , __a , bias=__a )
UpperCamelCase__ = emb.weight.data
return lin_layer
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
os.makedirs(__a , exist_ok=__a )
UpperCamelCase__ = os.path.basename(__a )
UpperCamelCase__ = url.split("""/""" )[-2]
UpperCamelCase__ = os.path.join(__a , __a )
if os.path.exists(__a ) and not os.path.isfile(__a ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(__a ):
UpperCamelCase__ = open(__a , """rb""" ).read()
if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop:
while True:
UpperCamelCase__ = source.read(8_192 )
if not buffer:
break
output.write(__a )
loop.update(len(__a ) )
UpperCamelCase__ = open(__a , """rb""" ).read()
if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
UpperCamelCase__ = _download(_MODELS[checkpoint_path] )
else:
UpperCamelCase__ = torch.load(__a , map_location="""cpu""" )
UpperCamelCase__ = original_checkpoint["""dims"""]
UpperCamelCase__ = original_checkpoint["""model_state_dict"""]
UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(__a )
rename_keys(__a )
UpperCamelCase__ = True
UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
UpperCamelCase__ = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
UpperCamelCase__ = WhisperForConditionalGeneration(__a )
UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a )
if len(__a ) > 0 and not set(__a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase__ = proj_out_weights
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 86
| 0
|
"""simple docstring"""
def a_ ( lowercase__ :list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__lowerCamelCase = sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
__magic_name__ : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__magic_name__ : Any = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = CamembertTokenizer
__a = CamembertTokenizerFast
__a = True
__a = True
def __a ( self: List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __a ( self: Any ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 10_04 )
def __a ( self: Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self: Optional[Any] ):
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def __a ( self: List[str] ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def __a ( self: Optional[Any] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A_ , )
| 281
| 1
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""KEY""")
UpperCAmelCase_ = TypeVar("""VAL""")
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class UpperCamelCase__ ( Generic[KEY, VAL] ):
'''simple docstring'''
__a : KEY
__a : VAL
class UpperCamelCase__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
"""simple docstring"""
super().__init__(snake_case__, snake_case__ )
def __bool__( self ) -> bool:
"""simple docstring"""
return False
UpperCAmelCase_ = _DeletedItem()
class UpperCamelCase__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self, snake_case__ = 8, snake_case__ = 0.75 ) -> None:
"""simple docstring"""
lowercase_ : Optional[int] = initial_block_size
lowercase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase_ : Any = capacity_factor
lowercase_ : str = 0
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
return hash(snake_case__ ) % len(self._buckets )
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> bool:
"""simple docstring"""
lowercase_ : Optional[Any] = self._buckets[ind]
if not stored:
lowercase_ : Tuple = _Item(snake_case__, snake_case__ )
self._len += 1
return True
elif stored.key == key:
lowercase_ : Any = _Item(snake_case__, snake_case__ )
return True
else:
return False
def snake_case__ ( self ) -> bool:
"""simple docstring"""
lowercase_ : Dict = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(snake_case__ )
def snake_case__ ( self ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase_ : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case__ ( self, snake_case__ ) -> None:
"""simple docstring"""
lowercase_ : Optional[Any] = self._buckets
lowercase_ : Optional[int] = [None] * new_size
lowercase_ : Dict = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def snake_case__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def snake_case__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def snake_case__ ( self, snake_case__ ) -> Iterator[int]:
"""simple docstring"""
lowercase_ : Any = self._get_bucket_index(snake_case__ )
for _ in range(len(self._buckets ) ):
yield ind
lowercase_ : int = self._get_next_ind(snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(snake_case__ ):
if self._try_set(snake_case__, snake_case__, snake_case__ ):
break
def __setitem__( self, snake_case__, snake_case__ ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(snake_case__, snake_case__ )
def __delitem__( self, snake_case__ ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(snake_case__ ):
lowercase_ : int = self._buckets[ind]
if item is None:
raise KeyError(snake_case__ )
if item is _deleted:
continue
if item.key == key:
lowercase_ : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, snake_case__ ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(snake_case__ ):
lowercase_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(snake_case__ )
def __len__( self ) -> int:
"""simple docstring"""
return self._len
def __iter__( self ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
"""simple docstring"""
lowercase_ : Tuple = """ ,""".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 436
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __magic_name__ ( lowercase=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
lowercase_ : List[str] = subparsers.add_parser("""test""" )
else:
lowercase_ : Optional[Any] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=lowercase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowercase_ : List[Any] = script_name
else:
lowercase_ : Dict = f"""--config_file={args.config_file} {script_name}"""
lowercase_ : List[str] = ["""accelerate-launch"""] + test_args.split()
lowercase_ : int = execute_subprocess_async(lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[Any] = test_command_parser()
lowercase_ : Any = parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main()
| 436
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if hor == 128:
SCREAMING_SNAKE_CASE : List[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
SCREAMING_SNAKE_CASE : List[Any] = (32, 128, 256)
SCREAMING_SNAKE_CASE : str = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
SCREAMING_SNAKE_CASE : Tuple = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
SCREAMING_SNAKE_CASE : List[Any] = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
SCREAMING_SNAKE_CASE : Dict = UNetaDModel(**lowercase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(lowercase )
hf_value_function.load_state_dict(lowercase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w" ) as f:
json.dump(lowercase , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
SCREAMING_SNAKE_CASE : Dict = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
SCREAMING_SNAKE_CASE : Union[str, Any] = model
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(**lowercase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
SCREAMING_SNAKE_CASE : Tuple = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(lowercase )
hf_value_function.load_state_dict(lowercase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(lowercase , lowercase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 62
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowerCamelCase__ ( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 62
| 1
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
a_ = Vector()
def __magic_name__ ( self ):
a_ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , """(0,0,0,0,0,1)""" )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3, 4] )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 4 )
def __magic_name__ ( self ):
a_ = Vector([1, 2] )
a_ = Vector([1, 2, 3, 4, 5] )
a_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
a_ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
a_ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
a_ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
a_ = Vector([2, -1, 4] ) # for test of dot product
a_ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __magic_name__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __magic_name__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
a_ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , """(3,4,7)""" )
def __magic_name__ ( self ):
a_ = Vector([1, 0, 0, 0, 0, 0] )
a_ = x.copy()
self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , str(_SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( self ):
a_ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , """(0,1,0)""" )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
a_ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __magic_name__ ( self ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 717
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
a_ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=UpperCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=UpperCamelCase )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
a_ = parse_args()
# Import training_script as a module.
a_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a_ = script_fpath.stem
a_ = importlib.import_module(UpperCamelCase )
# Patch sys.argv
a_ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 403
| 0
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowerCAmelCase__ : Any = len(A_ )
lowerCAmelCase__ : int = max(A_ )
lowerCAmelCase__ : List[Any] = min(A_ )
# create the counting array
lowerCAmelCase__ : Optional[int] = coll_max + 1 - coll_min
lowerCAmelCase__ : str = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , A_ ):
lowerCAmelCase__ : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase__ : Optional[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , A_ ) ):
lowerCAmelCase__ : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __SCREAMING_SNAKE_CASE ( A_ ):
return "".join([chr(A_ ) for i in counting_sort([ord(A_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
__UpperCamelCase : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : str = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 450
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : Union[str, Any]=None ,**lowercase_ : List[str] ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
lowerCAmelCase__ : List[str] = model
lowerCAmelCase__ : Optional[Any] = kwargs.get('''model_save_dir''' ,lowercase_ )
lowerCAmelCase__ : str = kwargs.get('''latest_model_name''' ,lowercase_ )
def __call__( self : Any ,**lowercase_ : Dict ):
lowerCAmelCase__ : List[Any] = {k: np.array(lowercase_ ) for k, v in kwargs.items()}
return self.model.run(lowercase_ ,lowercase_ )
@staticmethod
def __lowerCAmelCase ( lowercase_ : Union[str, Path] ,lowercase_ : str=None ,lowercase_ : str=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
lowerCAmelCase__ : Optional[Any] = '''CPUExecutionProvider'''
return ort.InferenceSession(lowercase_ ,providers=[provider] ,sess_options=lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Union[str, Path] ,lowercase_ : Optional[str] = None ,**lowercase_ : str ):
lowerCAmelCase__ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase__ : Tuple = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase__ : List[Any] = Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ ,lowercase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase__ : Dict = self.model_save_dir.joinpath(lowercase_ )
if src_path.exists():
lowerCAmelCase__ : Union[str, Any] = Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ ,lowercase_ )
except shutil.SameFileError:
pass
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Union[str, os.PathLike] ,**lowercase_ : Union[str, Any] ,):
if os.path.isfile(lowercase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
# saving model weights/files
self._save_pretrained(lowercase_ ,**lowercase_ )
@classmethod
def __lowerCAmelCase ( cls : Dict ,lowercase_ : Union[str, Path] ,lowercase_ : Optional[Union[bool, str, None]] = None ,lowercase_ : Optional[Union[str, None]] = None ,lowercase_ : bool = False ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional["ort.SessionOptions"] = None ,**lowercase_ : Optional[int] ,):
lowerCAmelCase__ : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase_ ):
lowerCAmelCase__ : str = OnnxRuntimeModel.load_model(
os.path.join(lowercase_ ,lowercase_ ) ,provider=lowercase_ ,sess_options=lowercase_ )
lowerCAmelCase__ : Any = Path(lowercase_ )
# load model from hub
else:
# download model
lowerCAmelCase__ : Optional[Any] = hf_hub_download(
repo_id=lowercase_ ,filename=lowercase_ ,use_auth_token=lowercase_ ,revision=lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,)
lowerCAmelCase__ : str = Path(lowercase_ ).parent
lowerCAmelCase__ : Union[str, Any] = Path(lowercase_ ).name
lowerCAmelCase__ : Optional[Any] = OnnxRuntimeModel.load_model(lowercase_ ,provider=lowercase_ ,sess_options=lowercase_ )
return cls(model=lowercase_ ,**lowercase_ )
@classmethod
def __lowerCAmelCase ( cls : Any ,lowercase_ : Union[str, Path] ,lowercase_ : bool = True ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,**lowercase_ : Any ,):
lowerCAmelCase__ : Union[str, Any] = None
if len(str(lowercase_ ).split('''@''' ) ) == 2:
lowerCAmelCase__ ,lowerCAmelCase__ : str = model_id.split('''@''' )
return cls._from_pretrained(
model_id=lowercase_ ,revision=lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,use_auth_token=lowercase_ ,**lowercase_ ,)
| 450
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
UpperCamelCase_ = CLIPTextModel(__UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase__ ( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : str=0 ) -> str:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(__UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = TextToVideoSDPipeline(**__UpperCAmelCase )
UpperCamelCase_ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCamelCase_ = 'np'
UpperCamelCase_ = sd_pipe(**__UpperCAmelCase ).frames
UpperCamelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCamelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class A ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
UpperCamelCase_ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCamelCase_ = pipe.to('cuda' )
UpperCamelCase_ = 'Spiderman is surfing'
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=25 , output_type='pt' ).frames
UpperCamelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
UpperCamelCase_ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
UpperCamelCase_ = pipe.to('cuda' )
UpperCamelCase_ = 'Spiderman is surfing'
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='pt' ).frames
UpperCamelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 559
|
from sklearn.metrics import mean_squared_error
import datasets
__a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
__a : Dict = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
__a : Any = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase__ ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]="uniform_average" , __UpperCAmelCase : List[Any]=True ) -> int:
"""simple docstring"""
UpperCamelCase_ = mean_squared_error(
__UpperCAmelCase , __UpperCAmelCase , sample_weight=__UpperCAmelCase , multioutput=__UpperCAmelCase , squared=__UpperCAmelCase )
return {"mse": mse}
| 559
| 1
|
import math
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = 2
__lowercase = int(math.sqrt(lowerCamelCase ) ) # Size of every segment
__lowercase = [True] * (end + 1)
__lowercase = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCamelCase )
for i in range(start * start , end + 1 , lowerCamelCase ):
__lowercase = False
start += 1
prime += in_prime
__lowercase = end + 1
__lowercase = min(2 * end , lowerCamelCase )
while low <= n:
__lowercase = [True] * (high - low + 1)
for each in in_prime:
__lowercase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCamelCase , high + 1 , lowerCamelCase ):
__lowercase = False
for j in range(len(lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__lowercase = high + 1
__lowercase = min(high + end , lowerCamelCase )
return prime
print(sieve(10**6))
| 80
|
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def A ( self : str , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , model.state_dict() )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'index.json' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(UpperCamelCase__ , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=UpperCamelCase__ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(UpperCamelCase__ , 'weight' , UpperCamelCase__ , {} )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
self.assertDictEqual(UpperCamelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCamelCase__ ).split('.' )[1]}} )
UpperCamelCase = load_offloaded_weight(UpperCamelCase__ , index['weight'] )
self.assertTrue(torch.equal(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1': 0, 'a.2': 2} )
UpperCamelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
| 430
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_snake_case: int = 'A painting of a squirrel eating a burger'
_snake_case: Optional[Any] = jax.device_count()
_snake_case: Dict = num_samples * [prompt]
_snake_case: int = sd_pipe.prepare_inputs(__snake_case )
_snake_case: Optional[Any] = replicate(__snake_case )
_snake_case: Dict = shard(__snake_case )
_snake_case: Any = jax.random.PRNGKey(0 )
_snake_case: Any = jax.random.split(__snake_case , jax.device_count() )
_snake_case: Optional[Any] = sd_pipe(__snake_case , __snake_case , __snake_case , num_inference_steps=25 , jit=__snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_snake_case: Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case: Dict = images[0, 2_53:2_56, 2_53:2_56, -1]
_snake_case: List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case: List[str] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Tuple = 'stabilityai/stable-diffusion-2'
_snake_case: Optional[int] = FlaxDPMSolverMultistepScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
_snake_case: int = FlaxStableDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , revision='bf16' , dtype=jnp.bfloataa , )
_snake_case: Any = scheduler_params
_snake_case: Dict = 'A painting of a squirrel eating a burger'
_snake_case: List[str] = jax.device_count()
_snake_case: Optional[int] = num_samples * [prompt]
_snake_case: str = sd_pipe.prepare_inputs(__snake_case )
_snake_case: List[Any] = replicate(__snake_case )
_snake_case: List[str] = shard(__snake_case )
_snake_case: Dict = jax.random.PRNGKey(0 )
_snake_case: Any = jax.random.split(__snake_case , jax.device_count() )
_snake_case: List[Any] = sd_pipe(__snake_case , __snake_case , __snake_case , num_inference_steps=25 , jit=__snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_snake_case: List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case: List[str] = images[0, 2_53:2_56, 2_53:2_56, -1]
_snake_case: Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case: str = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 703
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase ( __UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = "Speech2TextFeatureExtractor"
_SCREAMING_SNAKE_CASE = "Speech2TextTokenizer"
def __init__( self : List[str] , __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
_snake_case: Optional[int] = self.feature_extractor
_snake_case: Dict = False
def __call__( self : Optional[int] , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_snake_case: int = kwargs.pop('raw_speech' )
else:
_snake_case: Tuple = kwargs.pop('audio' , __snake_case )
_snake_case: Optional[Any] = kwargs.pop('sampling_rate' , __snake_case )
_snake_case: Optional[Any] = kwargs.pop('text' , __snake_case )
if len(__snake_case ) > 0:
_snake_case: Optional[Any] = args[0]
_snake_case: Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_snake_case: List[Any] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
_snake_case: Tuple = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case: List[Any] = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , *__snake_case : Optional[int] , **__snake_case : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict , *__snake_case : List[str] , **__snake_case : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_snake_case: Any = True
_snake_case: str = self.tokenizer
yield
_snake_case: Union[str, Any] = self.feature_extractor
_snake_case: Tuple = False
| 273
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 396
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_UpperCamelCase : Dict = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
def run_func(lowerCAmelCase : List[Any] ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Dict , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
__UpperCAmelCase = random.Random()
__UpperCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = "TensorFlow"
@property
def snake_case ( self: Optional[Any] ):
return tf.__version__
def snake_case ( self: List[Any] ,a: str ,a: int ,a: int ):
# initialize GPU on separate process
__UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCAmelCase = self._prepare_inference_func(a ,a ,a )
return self._measure_speed(_inference )
def snake_case ( self: int ,a: str ,a: int ,a: int ):
__UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCAmelCase = self._prepare_train_func(a ,a ,a )
return self._measure_speed(_train )
def snake_case ( self: int ,a: str ,a: int ,a: int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,a )
__UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCAmelCase = self._prepare_inference_func(a ,a ,a )
return self._measure_memory(_inference )
def snake_case ( self: Tuple ,a: str ,a: int ,a: int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,a )
__UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCAmelCase = self._prepare_train_func(a ,a ,a )
return self._measure_memory(_train )
def snake_case ( self: Dict ,a: str ,a: int ,a: int ):
__UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__UpperCAmelCase = (
hasattr(a ,'architectures' )
and isinstance(config.architectures ,a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase = __import__('transformers' ,fromlist=[model_class] )
__UpperCAmelCase = getattr(a ,a )
__UpperCAmelCase = model_cls(a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase = config.vocab_size if hasattr(a ,'vocab_size' ) else config.encoder.vocab_size
__UpperCAmelCase = random_input_ids(a ,a ,a )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_forward():
return model(a ,decoder_input_ids=a ,training=a )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_forward():
return model(a ,training=a )
__UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def snake_case ( self: List[Any] ,a: str ,a: int ,a: int ):
__UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__UpperCAmelCase = (
hasattr(a ,'architectures' )
and isinstance(config.architectures ,a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase = __import__('transformers' ,fromlist=[model_class] )
__UpperCAmelCase = getattr(a ,a )
__UpperCAmelCase = model_cls(a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase = config.vocab_size if hasattr(a ,'vocab_size' ) else config.encoder.vocab_size
__UpperCAmelCase = random_input_ids(a ,a ,a )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_train():
__UpperCAmelCase = model(a ,decoder_input_ids=a ,labels=a ,training=a )[0]
__UpperCAmelCase = tf.gradients(a ,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_train():
__UpperCAmelCase = model(a ,labels=a ,training=a )[0]
__UpperCAmelCase = tf.gradients(a ,model.trainable_variables )
return gradients
__UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def snake_case ( self: int ,a: Union[str, Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(a ,repeat=1 ,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__UpperCAmelCase = timeit.repeat(
a ,repeat=self.args.repeat ,number=10 ,)
return min(a ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def snake_case ( self: List[str] ,a: Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__UpperCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__UpperCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(a )
__UpperCAmelCase = meminfo.used
__UpperCAmelCase = Memory(a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__UpperCAmelCase = None
else:
__UpperCAmelCase = measure_peak_memory_cpu(a )
__UpperCAmelCase = Memory(a ) if isinstance(a ,a ) else memory_bytes
if self.args.trace_memory_line_by_line:
__UpperCAmelCase = stop_memory_tracing(a )
if memory is None:
__UpperCAmelCase = summary.total
else:
__UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 396
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def __a ( __lowerCAmelCase ) -> Optional[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : str = k.replace(__lowerCAmelCase , __lowerCAmelCase )
if k.startswith('encoder' ):
SCREAMING_SNAKE_CASE : List[Any] = k.replace('.attn' , '.self_attn' )
SCREAMING_SNAKE_CASE : Optional[Any] = k.replace('norm1' , 'self_attn_layer_norm' )
SCREAMING_SNAKE_CASE : List[Any] = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
SCREAMING_SNAKE_CASE : Dict = k.replace('norm1' , 'self_attn_layer_norm' )
SCREAMING_SNAKE_CASE : List[str] = k.replace('norm2' , 'encoder_attn_layer_norm' )
SCREAMING_SNAKE_CASE : Optional[int] = k.replace('norm3' , 'final_layer_norm' )
return k
def __a ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE : Dict = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
SCREAMING_SNAKE_CASE : List[str] = sd.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
SCREAMING_SNAKE_CASE : List[Any] = v
_lowerCamelCase : str = ["""START"""]
@torch.no_grad()
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE : Tuple = torch.load(__lowerCAmelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE : Tuple = model['model']
SCREAMING_SNAKE_CASE : int = BlenderbotConfig.from_json_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = BlenderbotForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE : List[str] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE : List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCAmelCase )
m.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
m.half()
m.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
_lowerCamelCase : int = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 308
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_lowerCamelCase : Dict = logging.getLogger(__name__)
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = np.argmax(__lowerCAmelCase , axis=1 )
return np.sum(outputs == labels )
def __a ( __lowerCAmelCase ) -> Any:
with open(__lowerCAmelCase , encoding='utf_8' ) as f:
SCREAMING_SNAKE_CASE : Any = csv.reader(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
next(__lowerCAmelCase ) # skip the first line
for line in tqdm(__lowerCAmelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Dict = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE : List[str] = with_conta
SCREAMING_SNAKE_CASE : Optional[Any] = with_conta
SCREAMING_SNAKE_CASE : Dict = len(__lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE : str = len(__lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE : Tuple = with_conta
SCREAMING_SNAKE_CASE : str = with_conta
SCREAMING_SNAKE_CASE : Any = mc_label
SCREAMING_SNAKE_CASE : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowerCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def __a ( ) -> Any:
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__lowerCAmelCase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__lowerCAmelCase , default='' )
parser.add_argument('--eval_dataset' , type=__lowerCAmelCase , default='' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--num_train_epochs' , type=__lowerCAmelCase , default=3 )
parser.add_argument('--train_batch_size' , type=__lowerCAmelCase , default=8 )
parser.add_argument('--eval_batch_size' , type=__lowerCAmelCase , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__lowerCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__lowerCAmelCase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__lowerCAmelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__lowerCAmelCase , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__lowerCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__lowerCAmelCase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__lowerCAmelCase , default=0.01 )
parser.add_argument('--lm_coef' , type=__lowerCAmelCase , default=0.9 )
parser.add_argument('--n_valid' , type=__lowerCAmelCase , default=374 )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
print(__lowerCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__lowerCAmelCase , __lowerCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE : List[str] = ['_start_', '_delimiter_', '_classify_']
SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
model.to(__lowerCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return obj
return [tokenize_and_encode(__lowerCAmelCase ) for o in obj]
logger.info('Encoding dataset...' )
SCREAMING_SNAKE_CASE : str = load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE : str = load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE : Tuple = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE : List[Any] = tokenize_and_encode(__lowerCAmelCase )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE : Dict = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE : str = min(__lowerCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE : List[str] = pre_process_datasets(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE : str = TensorDataset(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = RandomSampler(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE : Any = TensorDataset(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = SequentialSampler(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE : List[str] = args.max_steps
SCREAMING_SNAKE_CASE : Tuple = args.max_steps // (len(__lowerCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE : Dict = len(__lowerCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE : Dict = list(model.named_parameters() )
SCREAMING_SNAKE_CASE : int = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE : Tuple = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
SCREAMING_SNAKE_CASE : Optional[int] = AdamW(__lowerCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE : List[Any] = get_linear_schedule_with_warmup(
__lowerCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCAmelCase )
if args.do_train:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = tqdm(__lowerCAmelCase , desc='Training' )
for step, batch in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : List[str] = tuple(t.to(__lowerCAmelCase ) for t in batch )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = batch
SCREAMING_SNAKE_CASE : Tuple = model(__lowerCAmelCase , mc_token_ids=__lowerCAmelCase , lm_labels=__lowerCAmelCase , mc_labels=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE : Tuple = 'Training loss: {:.2e} lr: {:.2e}'.format(__lowerCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = model.module if hasattr(__lowerCAmelCase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(args.output_dir , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(args.output_dir , __lowerCAmelCase )
torch.save(model_to_save.state_dict() , __lowerCAmelCase )
model_to_save.config.to_json_file(__lowerCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowerCAmelCase )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 0, 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = 0, 0
for batch in tqdm(__lowerCAmelCase , desc='Evaluating' ):
SCREAMING_SNAKE_CASE : Optional[int] = tuple(t.to(__lowerCAmelCase ) for t in batch )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = model(
__lowerCAmelCase , mc_token_ids=__lowerCAmelCase , lm_labels=__lowerCAmelCase , mc_labels=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : List[Any] = mc_labels.to('cpu' ).numpy()
SCREAMING_SNAKE_CASE : Optional[int] = accuracy(__lowerCAmelCase , __lowerCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE : Optional[int] = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE : str = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE : Any = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE : Dict = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(args.output_dir , 'eval_results.txt' )
with open(__lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __lowerCAmelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 308
| 1
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_SCREAMING_SNAKE_CASE : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
_SCREAMING_SNAKE_CASE : Optional[int] = [file for file in filepaths if """ """ in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
_SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
_SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
_SCREAMING_SNAKE_CASE : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 226
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase : Union[str, Any] = 50003
lowercase : Optional[int] = 50002
@require_sentencepiece
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : List[str]= PLBartTokenizer
_a : List[str]= None
_a : str= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case )
lowercase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase : Any = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.vocab_size
lowercase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 4 ,snake_case )]
self.assertListEqual(snake_case ,["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowercase : Optional[int] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase : Tuple = tokenizer(snake_case ).input_ids
self.assertEqual(
tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""multi""" ,keep_accents=snake_case )
lowercase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowercase : Tuple = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.vocab_size
lowercase : int = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 7 ,snake_case )]
self.assertListEqual(
snake_case ,["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowercase : int = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase : Optional[Any] = tokenizer(snake_case ).input_ids
self.assertEqual(
tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
_a : str= "uclanlp/plbart-python-en_XX"
_a : Union[str, Any]= [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
_a : List[Any]= [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
_a : Optional[Any]= [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
lowercase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="""base""" ,src_lang="""python""" ,tgt_lang="""en_XX""" )
lowercase : Union[str, Any] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] ,50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] ,50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] ,50003 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertIn(snake_case ,self.tokenizer.all_special_ids )
lowercase : List[str] = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase : int = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case )
self.assertEqual(snake_case ,snake_case )
self.assertNotIn(self.tokenizer.eos_token ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] ,snake_case )
lowercase : Tuple = 10
lowercase : List[str] = self.tokenizer(snake_case ,max_length=snake_case ,truncation=snake_case ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,snake_case )
self.assertEqual(len(snake_case ) ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) ,[50004, 50001] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = tempfile.mkdtemp()
lowercase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case )
lowercase : Optional[Any] = PLBartTokenizer.from_pretrained(snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case ,return_tensors="""pt""" )
lowercase : List[str] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
lowercase : str = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case ,snake_case )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
lowercase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,snake_case )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.tokenizer(self.src_text ,padding=snake_case ,truncation=snake_case ,max_length=3 ,return_tensors="""pt""" )
lowercase : str = self.tokenizer(
text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=10 ,return_tensors="""pt""" )
lowercase : int = targets["""input_ids"""]
lowercase : Tuple = shift_tokens_right(snake_case ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""java""" )
self.assertEqual(
nested_simplify(snake_case ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50001,
} ,)
| 336
| 0
|
'''simple docstring'''
def __lowerCamelCase ( snake_case__ = 1_00 ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = n * (n + 1) * (2 * n + 1) / 6
_SCREAMING_SNAKE_CASE = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 714
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 569
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """swin2sr"""
_UpperCAmelCase = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_heads
SCREAMING_SNAKE_CASE_ : Any = window_size
SCREAMING_SNAKE_CASE_ : str = mlp_ratio
SCREAMING_SNAKE_CASE_ : Any = qkv_bias
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = drop_path_rate
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = upscale
SCREAMING_SNAKE_CASE_ : int = img_range
SCREAMING_SNAKE_CASE_ : Optional[int] = resi_connection
SCREAMING_SNAKE_CASE_ : List[Any] = upsampler
| 101
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33
| 0
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowercase__ : Any = precision
lowercase__ : List[str] = ceil(precision / 14 )
lowercase__ : Optional[Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
lowercase__ : List[Any] = 1
lowercase__ : Any = 13_59_14_09
lowercase__ : Dict = Decimal(lowercase_ )
for k in range(1 , lowercase_ ):
lowercase__ : Optional[int] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase_ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 5_0
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 495
|
import sys
lowerCamelCase__ : List[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCamelCase ( lowercase_ = N ) -> int:
'''simple docstring'''
lowercase__ : int = -sys.maxsize - 1
for i in range(len(lowercase_ ) - 12 ):
lowercase__ : Optional[int] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase__ : int = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 495
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_snake_case : Optional[int] = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
lowercase_ = None
class A ( _a ):
lowercase_ = 'train'
lowercase_ = 'dev'
lowercase_ = 'test'
class A :
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : List[InputExample] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Dict="[CLS]" , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Dict=-1_00 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : str=True , ) -> List[InputFeatures]:
"""simple docstring"""
_a = {label: i for i, label in enumerate(lowerCAmelCase_ )}
_a = []
for ex_index, example in enumerate(lowerCAmelCase_ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , lowerCAmelCase_ , len(lowerCAmelCase_ ) )
_a = []
_a = []
for word, label in zip(example.words , example.labels ):
_a = tokenizer.tokenize(lowerCAmelCase_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCAmelCase_ ) > 0:
tokens.extend(lowerCAmelCase_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCAmelCase_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a = tokenizer.num_special_tokens_to_add()
if len(lowerCAmelCase_ ) > max_seq_length - special_tokens_count:
_a = tokens[: (max_seq_length - special_tokens_count)]
_a = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a = [sequence_a_segment_id] * len(lowerCAmelCase_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a = [cls_token] + tokens
_a = [pad_token_label_id] + label_ids
_a = [cls_token_segment_id] + segment_ids
_a = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a = [1 if mask_padding_with_zero else 0] * len(lowerCAmelCase_ )
# Zero-pad up to the sequence length.
_a = max_seq_length - len(lowerCAmelCase_ )
if pad_on_left:
_a = ([pad_token] * padding_length) + input_ids
_a = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a = ([pad_token_segment_id] * padding_length) + segment_ids
_a = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCAmelCase_ ) == max_seq_length
assert len(lowerCAmelCase_ ) == max_seq_length
assert len(lowerCAmelCase_ ) == max_seq_length
assert len(lowerCAmelCase_ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a = None
features.append(
InputFeatures(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A ( _a ):
lowercase_ = 42
lowercase_ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Dict , lowerCAmelCase_ : TokenClassificationTask , lowerCAmelCase_ : str , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Split = Split.train , ) -> List[str]:
"""simple docstring"""
_a = os.path.join(
lowerCAmelCase_ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(lowerCAmelCase_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
_a = torch.load(lowerCAmelCase_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
_a = token_classification_task.read_examples_from_file(lowerCAmelCase_ , lowerCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_a = token_classification_task.convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , lowerCAmelCase_ )
def __len__( self : Any ) -> int:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Dict , lowerCAmelCase_ : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A :
lowercase_ = 42
lowercase_ = -100
def __init__( self : Optional[Any] , lowerCAmelCase_ : TokenClassificationTask , lowerCAmelCase_ : str , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Split = Split.train , ) -> str:
"""simple docstring"""
_a = token_classification_task.read_examples_from_file(lowerCAmelCase_ , lowerCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_a = token_classification_task.convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a = tf.data.Dataset.from_generator(
lowerCAmelCase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a = tf.data.Dataset.from_generator(
lowerCAmelCase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 22
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=7, __magic_name__=3, __magic_name__=18, __magic_name__=30, __magic_name__=400, __magic_name__=True, __magic_name__=None, __magic_name__=True, __magic_name__=None, __magic_name__=True, __magic_name__=[0.5, 0.5, 0.5], __magic_name__=[0.5, 0.5, 0.5], ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 18}
UpperCamelCase__ : str = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : Optional[Any] = min_resolution
UpperCamelCase__ : List[Any] = max_resolution
UpperCamelCase__ : List[Any] = do_resize
UpperCamelCase__ : Any = size
UpperCamelCase__ : Dict = do_center_crop
UpperCamelCase__ : Tuple = crop_size
UpperCamelCase__ : Tuple = do_normalize
UpperCamelCase__ : Dict = image_mean
UpperCamelCase__ : Dict = image_std
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__, '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__, '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__, '''size''' ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, Image.Image )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : List[str] = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__, numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, np.ndarray )
# Test not batched input
UpperCamelCase__ : Dict = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : Dict = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__, torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, torch.Tensor )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : Union[str, Any] = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 253
| 0
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : float
lowercase : TreeNode | None = None
lowercase : TreeNode | None = None
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def is_valid_tree(a__ ) -> bool:
if node is None:
return True
if not isinstance(a__ , a__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(a__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
a__ , a__ , a__ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , a__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , a__ )
)
return is_binary_search_tree_recursive_check(a__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowercase_ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
lowercase_ : Dict = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
lowercase_ : Union[str, Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
lowercase = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
lowercase = do_lower_case
lowercase = strip_accents
lowercase = tokenize_chinese_chars
lowercase = normalizer_class(**_lowerCAmelCase )
lowercase = do_lower_case
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 588
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = abs(lowercase_ )
lowercase = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = abs(lowercase_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
return sum(int(lowercase_ ) for c in str(abs(lowercase_ ) ) )
def SCREAMING_SNAKE_CASE ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase_ : Callable , lowercase_ : int ) -> None:
lowercase = F"""{func.__name__}({value})"""
lowercase = timeit(F"""__main__.{call}""" , setup="""import __main__""" )
print(F"""{call:56} = {func(lowercase_ )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase_ , lowercase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 588
| 1
|
'''simple docstring'''
from math import ceil
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(range(0 , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
SCREAMING_SNAKE_CASE : Dict = []
for i in device_map_blocks:
if device_map_blocks.count(_SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_SCREAMING_SNAKE_CASE )
# Missing blocks
SCREAMING_SNAKE_CASE : Optional[int] = [i for i in blocks if i not in device_map_blocks]
SCREAMING_SNAKE_CASE : Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(_SCREAMING_SNAKE_CASE ) )
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = list(range(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : int = int(ceil(n_layers / len(_SCREAMING_SNAKE_CASE ) ) )
SCREAMING_SNAKE_CASE : List[str] = [layers[i : i + n_blocks] for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
| 355
|
'''simple docstring'''
from collections import deque
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = deque()
SCREAMING_SNAKE_CASE : Union[str, Any] = [False for _ in range(_SCREAMING_SNAKE_CASE )]
SCREAMING_SNAKE_CASE : Any = [-1 for _ in range(_SCREAMING_SNAKE_CASE )]
SCREAMING_SNAKE_CASE : Union[str, Any] = index_of[:]
def strong_connect(_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :List[str] ):
SCREAMING_SNAKE_CASE : Any = index # the number when this node is seen
SCREAMING_SNAKE_CASE : str = index # lowest rank node reachable from here
index += 1
stack.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = True
for w in g[v]:
if index_of[w] == -1:
SCREAMING_SNAKE_CASE : List[str] = strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
SCREAMING_SNAKE_CASE : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Union[str, Any] = stack.pop()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
component.append(_SCREAMING_SNAKE_CASE )
while w != v:
SCREAMING_SNAKE_CASE : int = stack.pop()
SCREAMING_SNAKE_CASE : Tuple = False
component.append(_SCREAMING_SNAKE_CASE )
components.append(_SCREAMING_SNAKE_CASE )
return index
SCREAMING_SNAKE_CASE : int = []
for v in range(_SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return components
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
snake_case_ = 7
snake_case_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
snake_case_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
snake_case_ = [(u, v) for u, v in zip(source, target)]
snake_case_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 355
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = '''sew-d'''
def __init__(self : Tuple , A__ : str=3_2 , A__ : str=7_6_8 , A__ : Union[str, Any]=1_2 , A__ : List[str]=1_2 , A__ : List[Any]=3_0_7_2 , A__ : List[Any]=2 , A__ : Dict=5_1_2 , A__ : Any=2_5_6 , A__ : str=True , A__ : Dict=True , A__ : str=("p2c", "c2p") , A__ : Optional[int]="layer_norm" , A__ : Optional[int]="gelu_python" , A__ : List[Any]=0.1 , A__ : List[str]=0.1 , A__ : Optional[Any]=0.1 , A__ : Optional[Any]=0.0 , A__ : str=0.1 , A__ : Optional[int]=0.0_2 , A__ : Union[str, Any]=1e-7 , A__ : List[str]=1e-5 , A__ : Union[str, Any]="group" , A__ : List[Any]="gelu" , A__ : Dict=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A__ : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__ : Optional[Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__ : Optional[Any]=False , A__ : List[str]=1_2_8 , A__ : int=1_6 , A__ : List[str]=True , A__ : Dict=0.0_5 , A__ : Any=1_0 , A__ : str=2 , A__ : Optional[int]=0.0 , A__ : str=1_0 , A__ : List[Any]=0 , A__ : Tuple="mean" , A__ : Union[str, Any]=False , A__ : Optional[Any]=False , A__ : Optional[int]=2_5_6 , A__ : Dict=0 , A__ : List[str]=1 , A__ : str=2 , **A__ : Tuple , ) -> List[Any]:
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(A__ )
lowercase = list(A__ )
lowercase = list(A__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = squeeze_factor
lowercase = max_position_embeddings
lowercase = position_buckets
lowercase = share_att_key
lowercase = relative_attention
lowercase = norm_rel_ebd
lowercase = list(A__ )
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layer_norm_eps
lowercase = feature_layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# sequence classification
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
@property
def UpperCAmelCase__ (self : str ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 310
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = TaConfig.from_json_file(lowerCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
lowercase = TaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 310
| 1
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a :
SCREAMING_SNAKE_CASE__ : int = 42
SCREAMING_SNAKE_CASE__ : Optional[int] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : int = '''dict'''
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Any = field(default='''Translation''' ,init=lowerCamelCase__ ,repr=lowerCamelCase__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case_ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class a :
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : str = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''dict'''
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Dict = field(default='''TranslationVariableLanguages''' ,init=lowerCamelCase__ ,repr=lowerCamelCase__ )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = sorted(set(self.languages ) ) if self.languages else None
__SCREAMING_SNAKE_CASE: Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = set(self.languages )
if self.languages and set(__lowerCamelCase ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(__lowerCamelCase ) - lang_set ) )}) are not in valid set ({", ".join(__lowerCamelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__SCREAMING_SNAKE_CASE: Optional[Any] = []
for lang, text in translation_dict.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__SCREAMING_SNAKE_CASE: str = zip(*sorted(__lowerCamelCase ) )
return {"language": languages, "translation": translations}
def snake_case_ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 702
|
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Union[str, Any] = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 8
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs
| 31
| 0
|
from math import pi, sqrt, tan
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
__UpperCamelCase :Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
__UpperCamelCase :List[Any] = (sidea + sidea + sidea) / 2
__UpperCamelCase :int = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 717
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = StableUnCLIPImgaImgPipeline
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : List[Any] = frozenset([] )
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = 32
__UpperCamelCase :int = embedder_hidden_size
# image encoding components
__UpperCamelCase :Dict = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Dict = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :Any = AutoencoderKL()
__UpperCamelCase :Dict = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> Tuple:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :int = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :Tuple = input_image * 0.5 + 0.5
__UpperCamelCase :Any = input_image.clamp(0 , 1)
__UpperCamelCase :Any = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :int = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Dict = self.get_dummy_components()
__UpperCamelCase :Dict = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :int = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :Union[str, Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :str = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :int = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Any = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Tuple = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 452
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ : List[Any] = 16
lowerCamelCase_ : Union[str, Any] = 32
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 16 ) -> int:
UpperCamelCase_: Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase_: List[Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCamelCase ),
"""validation""": dataset["""train"""].select(lowerCamelCase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_: Union[str, Any] = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_: Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_: Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_: Optional[int] = 8
else:
UpperCamelCase_: Any = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase_: str = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
UpperCamelCase_: Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
UpperCamelCase_: Optional[Any] = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
# New Code #
UpperCamelCase_: List[Any] = []
# Download the dataset
UpperCamelCase_: Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
UpperCamelCase_: Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCamelCase_: int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: int = config["""lr"""]
UpperCamelCase_: str = int(config["""num_epochs"""] )
UpperCamelCase_: Any = int(config["""seed"""] )
UpperCamelCase_: int = int(config["""batch_size"""] )
UpperCamelCase_: int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCamelCase_: Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase_: Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase )
# New Code #
# Create our folds:
UpperCamelCase_: Tuple = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
UpperCamelCase_: Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = get_fold_dataloaders(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_: Any = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase_: int = AdamW(params=model.parameters() , lr=lowerCamelCase )
# Instantiate scheduler
UpperCamelCase_: int = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase ):
model.train()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase_: Dict = model(**lowerCamelCase )
UpperCamelCase_: Union[str, Any] = outputs.loss
UpperCamelCase_: int = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_: List[Any] = model(**lowerCamelCase )
UpperCamelCase_: List[str] = outputs.logits.argmax(dim=-1 )
UpperCamelCase_, UpperCamelCase_: Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
UpperCamelCase_: List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
UpperCamelCase_: List[Any] = []
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_: Tuple = model(**lowerCamelCase )
UpperCamelCase_: int = outputs.logits
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCamelCase_: List[Any] = torch.cat(lowerCamelCase , dim=0 )
UpperCamelCase_: Tuple = torch.stack(lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCamelCase_: Optional[int] = metric.compute(predictions=lowerCamelCase , references=lowerCamelCase )
accelerator.print("""Average test metrics from all folds:""" , lowerCamelCase )
def A__ ( ) -> Optional[int]:
UpperCamelCase_: Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCamelCase , default=3 , help="""The number of splits to perform across the dataset""" )
UpperCamelCase_: Dict = parser.parse_args()
UpperCamelCase_: str = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 548
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase_ : int = 16
lowerCamelCase_ : str = 32
def A__ ( lowerCamelCase ) -> int:
return int(x / 2**20 )
class _UpperCamelCase :
'''simple docstring'''
def __enter__( self : int ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase_: str = torch.cuda.memory_allocated()
return self
def __exit__( self : List[Any] , *snake_case_ : Union[str, Any] ):
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase_: List[str] = torch.cuda.memory_allocated()
UpperCamelCase_: int = torch.cuda.max_memory_allocated()
UpperCamelCase_: Optional[int] = bamb(self.end - self.begin )
UpperCamelCase_: Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A__ ( lowerCamelCase , lowerCamelCase = 16 , lowerCamelCase = "bert-base-cased" , lowerCamelCase = 3_20 , lowerCamelCase = 1_60 , ) -> Dict:
UpperCamelCase_: str = AutoTokenizer.from_pretrained(lowerCamelCase )
UpperCamelCase_: List[str] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''} )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_: Dict = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase_: List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
UpperCamelCase_: Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
def A__ ( lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialize accelerator
UpperCamelCase_: Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: str = config["""lr"""]
UpperCamelCase_: Any = int(config["""num_epochs"""] )
UpperCamelCase_: Tuple = int(config["""seed"""] )
UpperCamelCase_: Dict = int(config["""batch_size"""] )
UpperCamelCase_: Dict = args.model_name_or_path
set_seed(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_: int = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Any = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase )
# Instantiate optimizer
UpperCamelCase_: Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_: List[Any] = optimizer_cls(params=model.parameters() , lr=lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_: Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: Optional[int] = (len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_: List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
UpperCamelCase_: Dict = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_: str = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_: Dict = 0
# Now we train the model
UpperCamelCase_: Dict = {}
for epoch in range(lowerCamelCase , lowerCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = model(**lowerCamelCase )
UpperCamelCase_: str = outputs.loss
UpperCamelCase_: List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCamelCase_: Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
def A__ ( ) -> Optional[Any]:
UpperCamelCase_: Any = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase , default=lowerCamelCase , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=1 , help="""Number of train epochs.""" , )
UpperCamelCase_: Optional[int] = parser.parse_args()
UpperCamelCase_: Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 548
| 1
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [0] * no_of_processes
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Dict = burst_time[i]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 9_9_9_9_9_9_9_9_9
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE_ : str = remaining_time[j]
SCREAMING_SNAKE_CASE_ : Dict = j
SCREAMING_SNAKE_CASE_ : str = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE_ : Dict = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE_ : Dict = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = False
# Find finish time of current process
SCREAMING_SNAKE_CASE_ : Optional[int] = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE_ : Any = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE_ : Optional[int] = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE_ : str = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE_ : List[Any] = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
snake_case_ = int(input())
snake_case_ = [0] * no_of_processes
snake_case_ = [0] * no_of_processes
snake_case_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
snake_case_ = map(int, input().split())
snake_case_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case_ = burst_time
snake_case_ = no_of_processes
snake_case_ = waiting_time
snake_case_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
snake_case_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 718
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68
| 0
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
try:
with open(__UpperCAmelCase , 'rb' ) as flax_state_f:
SCREAMING_SNAKE_CASE_ = from_bytes(__UpperCAmelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCAmelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Dict:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE_ = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa , __UpperCAmelCase ) ).values()
if any(__UpperCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
SCREAMING_SNAKE_CASE_ = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = flatten_dict(__UpperCAmelCase , sep='.' )
SCREAMING_SNAKE_CASE_ = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE_ = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE_ = jnp.transpose(__UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE_ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
SCREAMING_SNAKE_CASE_ = '.'.join(__UpperCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE_ = np.asarray(__UpperCAmelCase ) if not isinstance(__UpperCAmelCase , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__UpperCAmelCase )
# remove from missing keys
missing_keys.remove(__UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCAmelCase )
pt_model.load_state_dict(__UpperCAmelCase )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE_ = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(__UpperCAmelCase ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
' use it for predictions and inference.' )
return pt_model
| 31
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = RoFormerTokenizer
_snake_case = RoFormerTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> int:
super().setUp()
def UpperCAmelCase ( self , **A ) -> Any:
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A )
def UpperCAmelCase ( self , **A ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A )
def UpperCAmelCase ( self ) -> Any:
snake_case : int = """永和服装饰品有限公司,今天天气非常好"""
snake_case : Optional[int] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[Any] = self.get_tokenizer()
snake_case , snake_case : Union[str, Any] = self.get_chinese_input_output_texts()
snake_case : Any = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Optional[int] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case , snake_case : int = self.get_chinese_input_output_texts()
snake_case : Dict = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Any = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Dict:
pass
| 587
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
if not isinstance(_A , _A ):
__lowerCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 0:
return False
__lowerCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571
| 0
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ : Union[str, Any] = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ )
else:
UpperCamelCase__ : Any = np.full((len(lowercase__ ), sequence_length) , lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ : List[Any] = tensor[:sequence_length]
else:
UpperCamelCase__ : Optional[Any] = tensor[:sequence_length]
else:
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ : List[str] = tensor[:sequence_length]
else:
UpperCamelCase__ : List[str] = tensor[:sequence_length]
return out_tensor.tolist()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
UpperCamelCase__ : List[str] = ord(lowercase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCamelCase__ : int = unicodedata.category(lowercase__ )
if cat.startswith("P" ):
return True
return False
@dataclass
class __a ( A_ ):
_lowerCAmelCase : PreTrainedTokenizerBase
_lowerCAmelCase : Union[bool, str, PaddingStrategy] = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = -1_0_0
_lowerCAmelCase : str = "pt"
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
import torch
UpperCamelCase__ : Union[str, Any] = "label" if "label" in features[0].keys() else "labels"
UpperCamelCase__ : List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCamelCase__ : List[Any] = self.tokenizer.pad(
SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCamelCase__ : Any = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCamelCase__ : Any = self.tokenizer.padding_side
if padding_side == "right":
UpperCamelCase__ : Union[str, Any] = [
list(SCREAMING_SNAKE_CASE ) + [self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE )) for label in labels
]
else:
UpperCamelCase__ : Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE )) + list(SCREAMING_SNAKE_CASE ) for label in labels
]
UpperCamelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCamelCase__ : Dict = padding_tensor(SCREAMING_SNAKE_CASE , -1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = [feature["original_entity_spans"] for feature in features]
UpperCamelCase__ : Union[str, Any] = padding_tensor(SCREAMING_SNAKE_CASE , (-1, -1) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = {k: torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 228
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Tuple =RobertaTokenizer
UpperCamelCase_ : int =RobertaTokenizerFast
UpperCamelCase_ : Tuple =True
UpperCamelCase_ : List[Any] ={'''cls_token''': '''<s>'''}
def _A (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase= [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowercase= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowercase= {'unk_token': '<unk>'}
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= 'lower newer'
__lowercase= 'lower newer'
return input_text, output_text
def _A (self ):
__lowercase= self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase= 'lower newer'
__lowercase= ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowercase= tokenizer.tokenize(lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowercase= tokens + [tokenizer.unk_token]
__lowercase= [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def _A (self ):
__lowercase= self.tokenizer_class.from_pretrained('roberta-base' )
__lowercase= tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__lowercase= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A (self ):
__lowercase= self.get_tokenizer()
__lowercase= 'Encode this sequence.'
__lowercase= tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__lowercase= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
__lowercase= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__lowercase= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
# Testing spaces after special tokens
__lowercase= '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )} ) # mask token has a left space
__lowercase= tokenizer.convert_tokens_to_ids(lowerCAmelCase )
__lowercase= 'Encode <mask> sequence'
__lowercase= 'Encode <mask>sequence'
__lowercase= tokenizer.encode(lowerCAmelCase )
__lowercase= encoded.index(lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
__lowercase= tokenizer.encode(lowerCAmelCase )
__lowercase= encoded.index(lowerCAmelCase )
__lowercase= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
def _A (self ):
pass
def _A (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase= self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
__lowercase= self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
__lowercase= 'A, <mask> AllenNLP sentence.'
__lowercase= tokenizer_r.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
__lowercase= tokenizer_p.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__lowercase= tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__lowercase= tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _A (self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase= self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase= json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase )
def _A (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase= 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase= f'{text_of_1_token} {text_of_1_token}'
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ), len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ), len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ), 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
__lowercase= self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
__lowercase= tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ), 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
| 230
| 0
|
'''simple docstring'''
import argparse
import json
import subprocess
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[]
_lowerCAmelCase =(
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
_lowerCAmelCase =subprocess.run(a__ , shell=a__ , stdout=subprocess.PIPE )
_lowerCAmelCase =output.stdout.decode('utf-8' )
_lowerCAmelCase =json.loads(a__ )
_lowerCAmelCase =status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a__ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(a__ ) )
if len(a__ ) > 0:
_lowerCAmelCase ='\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 58
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase__ : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ (datasets.BuilderConfig ):
'''simple docstring'''
__lowercase : Optional[datasets.Features] = None
__lowercase : str = "utf-8"
__lowercase : Optional[str] = None
__lowercase : Optional[str] = None
__lowercase : bool = True # deprecated
__lowercase : Optional[int] = None # deprecated
__lowercase : int = 10 << 20 # 10MB
__lowercase : Optional[bool] = None
class __magic_name__ (datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowercase : int = JsonConfig
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
snake_case__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:int ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
snake_case__ = data_files
if isinstance(_a , _a ):
snake_case__ = [files]
snake_case__ = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
snake_case__ = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
snake_case__ = [files]
snake_case__ = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case__ = self.config.features.arrow_schema.field(_a ).type
snake_case__ = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ = json.load(_a )
# We keep only the field we are interested in
snake_case__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_a , (list, tuple) ):
snake_case__ = set().union(*[row.keys() for row in dataset] )
snake_case__ = {col: [row.get(_a ) for row in dataset] for col in keys}
else:
snake_case__ = dataset
snake_case__ = pa.Table.from_pydict(_a )
yield file_idx, self._cast_table(_a )
# If the file has one json object per line
else:
with open(_a , '''rb''' ) as f:
snake_case__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ = max(self.config.chunksize // 32 , 16 << 10 )
snake_case__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
snake_case__ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ = batch.decode(self.config.encoding , errors=_a ).encode('''utf-8''' )
try:
while True:
try:
snake_case__ = paj.read_json(
io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_a , pa.ArrowInvalid )
and "straddling" not in str(_a )
or block_size > len(_a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_a )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ = json.load(_a )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_a , _a ): # list is the only sequence type supported in JSON
try:
snake_case__ = set().union(*[row.keys() for row in dataset] )
snake_case__ = {col: [row.get(_a ) for row in dataset] for col in keys}
snake_case__ = pa.Table.from_pydict(_a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_a )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
batch_idx += 1
| 33
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : List[str] = logging.get_logger(__name__)
def A_ ( a , a=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A_ ( a , a , a=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ : Dict = ''
else:
SCREAMING_SNAKE_CASE_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : str = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ : Any = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : Dict = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ : str = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : str = in_proj_bias[-config.hidden_size :]
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = dct.pop(a )
SCREAMING_SNAKE_CASE_ : str = val
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : List[str] = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def A_ ( a , a , a=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
SCREAMING_SNAKE_CASE_ : List[Any] = 8
# set labels if required
if not base_model:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1_0_0_0
SCREAMING_SNAKE_CASE_ : int = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : List[Any] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : str = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = {int(a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
SCREAMING_SNAKE_CASE_ : str = 3_8_4
SCREAMING_SNAKE_CASE_ : List[str] = 1_5_3_6
SCREAMING_SNAKE_CASE_ : Dict = 1_2
SCREAMING_SNAKE_CASE_ : Dict = 6
# load original model from torch hub
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.hub.load('facebookresearch/dino:main' , a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = create_rename_keys(a , base_model=a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
if base_model:
SCREAMING_SNAKE_CASE_ : List[Any] = ViTModel(a , add_pooling_layer=a ).eval()
else:
SCREAMING_SNAKE_CASE_ : Dict = ViTForImageClassification(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by ViTImageProcessor
SCREAMING_SNAKE_CASE_ : List[str] = ViTImageProcessor()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[Any] = encoding['pixel_values']
SCREAMING_SNAKE_CASE_ : Optional[int] = model(a )
if base_model:
SCREAMING_SNAKE_CASE_ : Tuple = original_model(a )
assert torch.allclose(a , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
SCREAMING_SNAKE_CASE_ : List[str] = original_model(a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
lowerCAmelCase : int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 700
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase):
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
SCREAMING_SNAKE_CASE_ : Any = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : str = load_dataset('nielsr/rvlcdip-demo' )
SCREAMING_SNAKE_CASE_ : List[Any] = dataset['train'][0]['image'].convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 16) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=_SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 353
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Optional[Any] , __magic_name__ : Dict=-1 ):
"""simple docstring"""
lowerCAmelCase__ = label_idx
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[Split, str] ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(__magic_name__ , f"""{mode}.txt""" )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(__magic_name__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
lowerCAmelCase__ = []
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = line.split(" " )
words.append(splits[0] )
if len(__magic_name__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) )
return examples
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ):
"""simple docstring"""
lowerCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(__magic_name__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(__magic_name__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str ):
"""simple docstring"""
if path:
with open(__magic_name__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Optional[int] ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
if path:
with open(__magic_name__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Union[Split, str] ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(__magic_name__ , f"""{mode}.txt""" )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(__magic_name__ , encoding="utf-8" ) as f:
for sentence in parse_incr(__magic_name__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(__magic_name__ ) == len(__magic_name__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
return examples
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ):
"""simple docstring"""
lowerCAmelCase__ = 0
for sentence in parse_incr(__magic_name__ ):
lowerCAmelCase__ = preds_list[example_id]
lowerCAmelCase__ = ""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__magic_name__ )
example_id += 1
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
if path:
with open(__magic_name__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE : List[Any] = True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE : List[Any] = object
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : str ) -> str:
pass
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Any = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase_( lowercase_ : Namespace ) -> List[Any]:
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase_ , args.host , args.port , args.workers )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : dict
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase__ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def snake_case__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ):
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
async def snake_case__ ( self , lowerCamelCase__=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
# Check we don't have empty string
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(lowerCamelCase__ )} )
| 661
| 0
|
import math
import tensorflow as tf
from packaging import version
def __UpperCAmelCase ( UpperCAmelCase )-> Tuple:
"""simple docstring"""
lowercase = tf.convert_to_tensor(UpperCAmelCase )
lowercase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ), x.dtype ) ))
return x * cdf
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = tf.convert_to_tensor(UpperCAmelCase )
lowercase = tf.cast(math.pi, x.dtype )
lowercase = tf.cast(0.04_4715, x.dtype )
lowercase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCAmelCase, 3 )) ))
return x * cdf
def __UpperCAmelCase ( UpperCAmelCase )-> int:
"""simple docstring"""
lowercase = tf.convert_to_tensor(UpperCAmelCase )
return x * tf.tanh(tf.math.softplus(UpperCAmelCase ) )
def __UpperCAmelCase ( UpperCAmelCase )-> Dict:
"""simple docstring"""
lowercase = tf.convert_to_tensor(UpperCAmelCase )
lowercase = tf.cast(0.04_4715, x.dtype )
lowercase = tf.cast(0.79_7884_5608, x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCAmelCase ( UpperCAmelCase )-> str:
"""simple docstring"""
lowercase = tf.convert_to_tensor(UpperCAmelCase )
lowercase = tf.cast(1.702, x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCAmelCase ), -10, 10 )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase=-1 )-> Tuple:
"""simple docstring"""
lowercase ,lowercase = tf.split(UpperCAmelCase, 2, axis=UpperCAmelCase )
return a * tf.math.sigmoid(UpperCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
return tf.keras.activations.gelu(UpperCAmelCase, approximate=UpperCAmelCase )
A_ = tf.keras.activations.gelu
A_ = approximate_gelu_wrap
else:
A_ = _gelu
A_ = _gelu_new
A_ = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 479
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=7 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=18 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : Union[str, Any]=4_00 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=True , ) -> Tuple:
'''simple docstring'''
lowercase = size if size is not None else {'''shortest_edge''': 20}
lowercase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def __a ( self : Optional[int] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowercase ( _A , unittest.TestCase ):
lowercase = MobileViTImageProcessor if is_vision_available() else None
def __a ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase = MobileViTImageProcessingTester(self )
@property
def __a ( self : Tuple ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_flip_channel_order''' ) )
def __a ( self : List[str] ) -> str:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __a ( self : Any ) -> Optional[Any]:
'''simple docstring'''
pass
def __a ( self : Any ) -> List[str]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self : int ) -> str:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self : Dict ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 479
| 1
|
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = eval_examples
__snake_case = post_process_function
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "eval" , **__SCREAMING_SNAKE_CASE , ) -> Dict[str, float]:
'''simple docstring'''
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__snake_case = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" , **__SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''' )
__snake_case = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__snake_case = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 24
|
from maths.prime_check import is_prime
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = F"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase )
if is_prime(UpperCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611
| 0
|
import math
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 100 ) -> int:
'''simple docstring'''
A__ = sum(i * i for i in range(1 , n + 1 ) )
A__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 586
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = field(default_factory=_UpperCAmelCase )
lowerCamelCase = field(default_factory=_UpperCAmelCase )
def snake_case__ ( self : Union[str, Any],lowercase_ : Dict,lowercase_ : Tensor,lowercase_ : Tensor )-> Tuple:
'''simple docstring'''
A__ = len(list(m.modules() ) ) == 1 or isinstance(lowercase_,nn.Convad ) or isinstance(lowercase_,nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase_ )
def __call__( self : Tuple,lowercase_ : Tensor )-> Any:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase_ )
[x.remove() for x in self.handles]
return self
@property
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0,self.traced ) )
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 1
lowerCamelCase = field(default_factory=_UpperCAmelCase )
lowerCamelCase = field(default_factory=_UpperCAmelCase )
lowerCamelCase = True
def __call__( self : str,lowercase_ : Tensor )-> Dict:
'''simple docstring'''
A__ = Tracker(self.dest )(lowercase_ ).parametrized
A__ = Tracker(self.src )(lowercase_ ).parametrized
A__ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip,lowercase_ ) )
A__ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip,lowercase_ ) )
if len(lowercase_ ) != len(lowercase_ ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(lowercase_ )} operations while'
F' destination module has {len(lowercase_ )}.' )
for dest_m, src_m in zip(lowercase_,lowercase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class A ( nn.Module ):
"""simple docstring"""
def __init__( self : Any,lowercase_ : nn.Module )-> int:
'''simple docstring'''
super().__init__()
A__ = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'Unexpected layer name {k}'
A__ = len(lowercase_ ) + 1
feature_blocks.append((F'res{block_index}', v) )
A__ = nn.ModuleDict(lowercase_ )
def snake_case__ ( self : List[Any],lowercase_ : Tensor )-> Any:
'''simple docstring'''
return get_trunk_forward_outputs(
lowercase_,out_feat_keys=lowercase_,feature_blocks=self._feature_blocks,)
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any],lowercase_ : str )-> str:
'''simple docstring'''
A__ = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any],lowercase_ : str )-> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
A__ = self.convert_name_to_timm(lowercase_ )
A__ = partial(lambda: (timm.create_model(lowercase_,pretrained=lowercase_ ).eval(), None) )
else:
A__ = super().__getitem__(lowercase_ )
return val
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __getitem__( self : Tuple,lowercase_ : str )-> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
A__ = RegNetModel
else:
A__ = RegNetForImageClassification
return val
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Tuple[str, str]] ) -> Dict:
'''simple docstring'''
for from_key, to_key in keys:
A__ = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Any:
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
A__ , A__ = from_model_func()
A__ = our_model_func(SCREAMING_SNAKE_CASE__ ).eval()
A__ = ModuleTransfer(src=SCREAMING_SNAKE_CASE__ , dest=SCREAMING_SNAKE_CASE__ , raise_if_mismatch=SCREAMING_SNAKE_CASE__ )
A__ = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE__ )
if from_state_dict is not None:
A__ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
A__ = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
A__ = manually_copy_vissl_head(SCREAMING_SNAKE_CASE__ , our_model.state_dict() , SCREAMING_SNAKE_CASE__ )
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
A__ = our_model(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
A__ = (
our_outputs.logits if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else our_outputs.last_hidden_state
)
A__ = from_model(SCREAMING_SNAKE_CASE__ )
A__ = from_output[-1] if type(SCREAMING_SNAKE_CASE__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
A__ = our_outputs.hidden_states[-1]
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
A__ = 224 if 'seer' not in name else 384
# we can use the convnext one
A__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
print(f'Pushed {name}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ) -> List[Any]:
'''simple docstring'''
A__ = 'imagenet-1k-id2label.json'
A__ = 1000
A__ = (1, num_labels)
A__ = 'huggingface/label-files'
A__ = num_labels
A__ = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
A__ = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
A__ = NameToOurModelFuncMap()
A__ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , model_dir=str(SCREAMING_SNAKE_CASE__ ) , map_location='cpu' )
A__ = model_func()
# check if we have a head, if yes add it
A__ = files['classy_state_dict']['base_model']['model']
A__ = model_state_dict['trunk']
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
return model.eval(), model_state_dict["heads"]
# pretrained
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
A__ = partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
SCREAMING_SNAKE_CASE__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
SCREAMING_SNAKE_CASE__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 586
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.