code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
SCREAMING_SNAKE_CASE_:Union[str, Any] = """Usage of script: script_name <size_of_canvas:int>"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCamelCase ( _lowerCAmelCase ) -> list[list[bool]]:
"""simple docstring"""
A : str = [[False for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
return canvas
def __UpperCamelCase ( _lowerCAmelCase ) -> None:
"""simple docstring"""
for i, row in enumerate(_lowerCAmelCase ):
for j, _ in enumerate(_lowerCAmelCase ):
A : List[str] = bool(random.getrandbits(1 ) )
def __UpperCamelCase ( _lowerCAmelCase ) -> list[list[bool]]:
"""simple docstring"""
A : str = np.array(_lowerCAmelCase )
A : str = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowerCAmelCase ):
for c, pt in enumerate(_lowerCAmelCase ):
A : Any = __judge_point(
_lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
A : Optional[int] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
A : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : Tuple = 0
A : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
A : Optional[int] = pt
if pt:
if alive < 2:
A : Optional[int] = False
elif alive == 2 or alive == 3:
A : Tuple = True
elif alive > 3:
A : Union[str, Any] = False
else:
if alive == 3:
A : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
SCREAMING_SNAKE_CASE_:Any = int(sys.argv[1])
# main working structure of this module.
SCREAMING_SNAKE_CASE_:Any = create_canvas(canvas_size)
seed(c)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_:int = plt.subplots()
fig.show()
SCREAMING_SNAKE_CASE_:List[str] = ListedColormap(["""w""", """k"""])
try:
while True:
SCREAMING_SNAKE_CASE_:Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE_:Dict = 6_378_137.0
SCREAMING_SNAKE_CASE_:List[str] = 6_356_752.314_245
SCREAMING_SNAKE_CASE_:Union[str, Any] = 6_378_137
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
A : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
A : Dict = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
A : List[str] = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
A : List[Any] = radians(_lowerCAmelCase )
A : Dict = radians(_lowerCAmelCase )
# Equation
A : Tuple = sin((phi_a - phi_a) / 2 )
A : Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A : Dict = sqrt(sin_sq_phi + (cos(_lowerCAmelCase ) * cos(_lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "microsoft/speecht5_tts"
__lowerCamelCase : Optional[int] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
__lowerCamelCase : Dict = "text_reader"
__lowerCamelCase : Dict = SpeechTaProcessor
__lowerCamelCase : List[str] = SpeechTaForTextToSpeech
__lowerCamelCase : Union[str, Any] = SpeechTaHifiGan
__lowerCamelCase : Optional[int] = ["text"]
__lowerCamelCase : Optional[int] = ["audio"]
def _lowerCAmelCase ( self ):
if self.post_processor is None:
A : Optional[int] = """microsoft/speecht5_hifigan"""
super().setup()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
A : Union[str, Any] = self.pre_processor(text=lowerCamelCase__, return_tensors="""pt""", truncation=lowerCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
A : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""", split="""validation""" )
A : int = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
with torch.no_grad():
return self.post_processor(lowerCamelCase__ ).cpu().detach()
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True , _lowerCAmelCase="pt" ) -> str:
"""simple docstring"""
A : Any = {"""add_prefix_space""": True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(""" """ ) else {}
A : Optional[int] = padding_side
return tokenizer(
[line] , max_length=_lowerCAmelCase , padding="""max_length""" if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Dict = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__="train", lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="", ):
super().__init__()
A : List[str] = Path(lowerCamelCase__ ).joinpath(type_path + """.source""" )
A : List[Any] = Path(lowerCamelCase__ ).joinpath(type_path + """.target""" )
A : int = self.get_char_lens(self.src_file )
A : List[Any] = max_source_length
A : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
A : str = tokenizer
A : str = prefix
if n_obs is not None:
A : Optional[int] = self.src_lens[:n_obs]
A : Optional[Any] = src_lang
A : Tuple = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self, lowerCamelCase__ ):
A : List[Any] = index + 1 # linecache starts at 1
A : int = self.prefix + linecache.getline(str(self.src_file ), lowerCamelCase__ ).rstrip("""\n""" )
A : int = linecache.getline(str(self.tgt_file ), lowerCamelCase__ ).rstrip("""\n""" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer, lowerCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer, lowerCamelCase__ ) else self.tokenizer
)
A : int = self.tokenizer.generator if isinstance(self.tokenizer, lowerCamelCase__ ) else self.tokenizer
A : Any = encode_line(lowerCamelCase__, lowerCamelCase__, self.max_source_length, """right""" )
A : List[Any] = encode_line(lowerCamelCase__, lowerCamelCase__, self.max_target_length, """right""" )
A : List[str] = source_inputs["""input_ids"""].squeeze()
A : str = target_inputs["""input_ids"""].squeeze()
A : Any = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__ ):
return [len(lowerCamelCase__ ) for x in Path(lowerCamelCase__ ).open().readlines()]
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = torch.stack([x["""input_ids"""] for x in batch] )
A : Tuple = torch.stack([x["""attention_mask"""] for x in batch] )
A : List[str] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, lowerCamelCase__ )
else self.tokenizer.pad_token_id
)
A : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, lowerCamelCase__ )
else self.tokenizer.pad_token_id
)
A : Tuple = trim_batch(lowerCamelCase__, lowerCamelCase__ )
A , A : Tuple = trim_batch(lowerCamelCase__, lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : Any = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
SCREAMING_SNAKE_CASE_:Optional[Any] = getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def __UpperCamelCase ( _lowerCAmelCase ) -> None:
"""simple docstring"""
A : Any = get_git_info()
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """git_log.json""" ) )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=4 , **_lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(_lowerCAmelCase , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
A : List[str] = git.Repo(search_parent_directories=_lowerCAmelCase )
A : Dict = {
"""repo_id""": str(_lowerCAmelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List:
"""simple docstring"""
return list(map(_lowerCAmelCase , _lowerCAmelCase ) )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
with open(_lowerCAmelCase , """wb""" ) as f:
return pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
def remove_articles(_lowerCAmelCase ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
A : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : Dict = normalize_answer(_lowerCAmelCase ).split()
A : Tuple = normalize_answer(_lowerCAmelCase ).split()
A : Optional[Any] = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
A : Dict = sum(common.values() )
if num_same == 0:
return 0
A : Union[str, Any] = 1.0 * num_same / len(_lowerCAmelCase )
A : Dict = 1.0 * num_same / len(_lowerCAmelCase )
A : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
A : Dict = 0
for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A : List[Any] = """dropout_rate"""
for p in extra_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
continue
A : List[Any] = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
return hparams, config
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
import numpy as np
import qiskit
def __UpperCamelCase ( _lowerCAmelCase = 8 , _lowerCAmelCase = None ) -> str:
"""simple docstring"""
A : Tuple = np.random.default_rng(seed=_lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
A : List[Any] = 6 * key_len
# Measurement basis for Alice's qubits.
A : List[str] = rng.integers(2 , size=_lowerCAmelCase )
# The set of states Alice will prepare.
A : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# Measurement basis for Bob's qubits.
A : Optional[Any] = rng.integers(2 , size=_lowerCAmelCase )
# Quantum Circuit to simulate BB84
A : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
A : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
A : Optional[Any] = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1 , seed_simulator=_lowerCAmelCase )
# Returns the result of measurement.
A : Optional[int] = job.result().get_counts(_lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
A : Any = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
A : Union[str, Any] = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = MobileBertConfig.from_json_file(_lowerCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
A : Union[str, Any] = MobileBertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
A : str = load_tf_weights_in_mobilebert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
SCREAMING_SNAKE_CASE_:Dict = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
SCREAMING_SNAKE_CASE_:List[str] = {
"""ctrl""": 256,
}
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""Pregnancy""": 168_629,
"""Christianity""": 7_675,
"""Explain""": 106_423,
"""Fitness""": 63_440,
"""Saving""": 63_163,
"""Ask""": 27_171,
"""Ass""": 95_985,
"""Joke""": 163_509,
"""Questions""": 45_622,
"""Thoughts""": 49_605,
"""Retail""": 52_342,
"""Feminism""": 164_338,
"""Writing""": 11_992,
"""Atheism""": 192_263,
"""Netflix""": 48_616,
"""Computing""": 39_639,
"""Opinion""": 43_213,
"""Alone""": 44_967,
"""Funny""": 58_917,
"""Gaming""": 40_358,
"""Human""": 4_088,
"""India""": 1_331,
"""Joker""": 77_138,
"""Diet""": 36_206,
"""Legal""": 11_859,
"""Norman""": 4_939,
"""Tip""": 72_689,
"""Weight""": 52_343,
"""Movies""": 46_273,
"""Running""": 23_425,
"""Science""": 2_090,
"""Horror""": 37_793,
"""Confession""": 60_572,
"""Finance""": 12_250,
"""Politics""": 16_360,
"""Scary""": 191_985,
"""Support""": 12_654,
"""Technologies""": 32_516,
"""Teenage""": 66_160,
"""Event""": 32_769,
"""Learned""": 67_460,
"""Notion""": 182_770,
"""Wikipedia""": 37_583,
"""Books""": 6_665,
"""Extract""": 76_050,
"""Confessions""": 102_701,
"""Conspiracy""": 75_932,
"""Links""": 63_674,
"""Narcissus""": 150_425,
"""Relationship""": 54_766,
"""Relationships""": 134_796,
"""Reviews""": 41_671,
"""News""": 4_256,
"""Translation""": 26_820,
"""multilingual""": 128_406,
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : int = set()
A : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A : Tuple = char
A : Tuple = set(_lowerCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any = CONTROL_CODES
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__="<unk>", **lowerCamelCase__ ):
super().__init__(unk_token=lowerCamelCase__, **lowerCamelCase__ )
with open(lowerCamelCase__, encoding="""utf-8""" ) as vocab_handle:
A : Dict = json.load(lowerCamelCase__ )
A : str = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__, encoding="""utf-8""" ) as merges_handle:
A : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
A : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
A : Dict = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : List[Any] = {}
@property
def _lowerCAmelCase ( self ):
return len(self.encoder )
def _lowerCAmelCase ( self ):
return dict(self.encoder, **self.added_tokens_encoder )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if token in self.cache:
return self.cache[token]
A : str = tuple(lowerCamelCase__ )
A : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A : Optional[int] = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
A : List[Any] = min(lowerCamelCase__, key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__, float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A : Tuple = bigram
A : List[str] = []
A : Dict = 0
while i < len(lowerCamelCase__ ):
try:
A : Any = word.index(lowerCamelCase__, lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A : int = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A : str = tuple(lowerCamelCase__ )
A : int = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
A : List[Any] = get_pairs(lowerCamelCase__ )
A : List[Any] = """@@ """.join(lowerCamelCase__ )
A : Optional[int] = word[:-4]
A : Union[str, Any] = word
return word
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = []
A : Any = re.findall(R"""\S+\n?""", lowerCamelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__ ).split(""" """ ) ) )
return split_tokens
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.encoder.get(lowerCamelCase__, self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.decoder.get(lowerCamelCase__, self.unk_token )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : str = """ """.join(lowerCamelCase__ ).replace("""@@ """, """""" ).strip()
return out_string
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : str = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A : Optional[Any] = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase__, ensure_ascii=lowerCamelCase__ ) + """\n""" )
A : str = 0
with open(lowerCamelCase__, """w""", encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
A : int = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_:List[str] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
SCREAMING_SNAKE_CASE_:List[Any] = """▁"""
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = ["input_ids", "attention_mask"]
def __init__( self, lowerCamelCase__, lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__="<pad>", lowerCamelCase__=100, lowerCamelCase__=None, lowerCamelCase__ = None, lowerCamelCase__=True, **lowerCamelCase__, ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A : str = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A : str = len(set(filter(lambda lowerCamelCase__ : bool("""extra_id""" in str(lowerCamelCase__ ) ), lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
A : str = legacy
A : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, pad_token=lowerCamelCase__, extra_ids=lowerCamelCase__, additional_special_tokens=lowerCamelCase__, sp_model_kwargs=self.sp_model_kwargs, legacy=lowerCamelCase__, **lowerCamelCase__, )
A : List[str] = vocab_file
A : Dict = extra_ids
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
A : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""", lowerCamelCase__, )
return max_model_length
@property
def _lowerCAmelCase ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def _lowerCAmelCase ( self ):
A : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__, token_ids_a=lowerCamelCase__, already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def _lowerCAmelCase ( self ):
return list(
set(filter(lambda lowerCamelCase__ : bool(re.search(R"""<extra_id_\d+>""", lowerCamelCase__ ) ) is not None, self.additional_special_tokens ) ) )
def _lowerCAmelCase ( self ):
return [self._convert_token_to_id(lowerCamelCase__ ) for token in self.get_sentinel_tokens()]
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Dict = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
A : Tuple = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
A : Union[str, Any] = self.__dict__.copy()
A : List[Any] = None
return state
def __setstate__( self, lowerCamelCase__ ):
A : Optional[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : Optional[Any] = {}
A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self, lowerCamelCase__, **lowerCamelCase__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
A : Any = SPIECE_UNDERLINE + text.replace(lowerCamelCase__, """ """ )
return super().tokenize(lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, **lowerCamelCase__ ):
if not self.legacy:
A : Union[str, Any] = text.startswith(lowerCamelCase__ )
if is_first:
A : int = text[1:]
A : Optional[int] = self.sp_model.encode(lowerCamelCase__, out_type=lowerCamelCase__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(lowerCamelCase__ ):
A : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if token.startswith("""<extra_id_""" ):
A : Any = re.match(R"""<extra_id_(\d+)>""", lowerCamelCase__ )
A : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if index < self.sp_model.get_piece_size():
A : Tuple = self.sp_model.IdToPiece(lowerCamelCase__ )
else:
A : Any = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Any = []
A : List[Any] = """"""
A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
A : str = True
A : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
A : List[str] = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : List[str] = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__, """wb""" ) as fi:
A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase = 10**12 ) -> int:
"""simple docstring"""
A : Tuple = 1
A : Dict = 0
A : int = 1
A : Optional[int] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE_:Dict = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
A : int = k.replace(_lowerCAmelCase , _lowerCAmelCase )
return k
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> PegasusForConditionalGeneration:
"""simple docstring"""
A : Optional[int] = DEFAULTS.copy()
cfg_kwargs.update(_lowerCAmelCase )
A : Any = PegasusConfig(**_lowerCAmelCase )
A : Dict = PegasusForConditionalGeneration(_lowerCAmelCase )
A : int = torch_model.model.state_dict()
A : int = {}
for k, v in tf_weights.items():
A : Optional[int] = rename_state_dict_key(_lowerCAmelCase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
A : List[Any] = v.T
A : Tuple = torch.tensor(_lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
A : str = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
A : List[str] = mapping["""shared.weight"""]
A : int = mapping["""shared.weight"""]
A : Optional[int] = {k: torch.zeros_like(_lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**_lowerCAmelCase )
A , A : List[str] = torch_model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
A : int = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __UpperCamelCase ( _lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
A : Any = tf.train.list_variables(_lowerCAmelCase )
A : Any = {}
A : Union[str, Any] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(_lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
A : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
A : Dict = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
A : Dict = array
return tf_weights
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : List[str] = Path(_lowerCAmelCase ).parent.name
A : Tuple = task_specific_params[f'''summarization_{dataset}''']["""max_position_embeddings"""]
A : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=_lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowerCAmelCase )
# convert model
A : int = get_tf_weights_as_numpy(_lowerCAmelCase )
A : Dict = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
A : Optional[int] = task_specific_params
A : Any = convert_pegasus(_lowerCAmelCase , _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
A : str = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(_lowerCAmelCase , Path(_lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE_:Tuple = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE_:List[Any] = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE_:str = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[Any] = {}
A : Tuple = job["""started_at"""]
A : str = job["""completed_at"""]
A : List[Any] = date_parser.parse(_lowerCAmelCase )
A : Optional[int] = date_parser.parse(_lowerCAmelCase )
A : Union[str, Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A : int = start
A : Tuple = end
A : Optional[int] = duration_in_min
return job_info
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
"""simple docstring"""
A : int = None
if token is not None:
A : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
A : List[str] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
A : List[str] = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
A : Tuple = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["""jobs"""]} )
A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_lowerCAmelCase ):
A : List[str] = requests.get(url + f'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_:Any = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE_:Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:List[Any] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=3, lowerCamelCase__=("DownEncoderBlock2D",), lowerCamelCase__=(64,), lowerCamelCase__=2, lowerCamelCase__=32, lowerCamelCase__="silu", lowerCamelCase__=True, ):
super().__init__()
A : Optional[Any] = layers_per_block
A : int = torch.nn.Convad(
lowerCamelCase__, block_out_channels[0], kernel_size=3, stride=1, padding=1, )
A : str = None
A : Dict = nn.ModuleList([] )
# down
A : List[Any] = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase__ ):
A : int = output_channel
A : List[str] = block_out_channels[i]
A : List[Any] = i == len(lowerCamelCase__ ) - 1
A : Any = get_down_block(
lowerCamelCase__, num_layers=self.layers_per_block, in_channels=lowerCamelCase__, out_channels=lowerCamelCase__, add_downsample=not is_final_block, resnet_eps=1e-6, downsample_padding=0, resnet_act_fn=lowerCamelCase__, resnet_groups=lowerCamelCase__, attention_head_dim=lowerCamelCase__, temb_channels=lowerCamelCase__, )
self.down_blocks.append(lowerCamelCase__ )
# mid
A : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1], resnet_eps=1e-6, resnet_act_fn=lowerCamelCase__, output_scale_factor=1, resnet_time_scale_shift="""default""", attention_head_dim=block_out_channels[-1], resnet_groups=lowerCamelCase__, temb_channels=lowerCamelCase__, )
# out
A : int = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=lowerCamelCase__, eps=1e-6 )
A : Union[str, Any] = nn.SiLU()
A : Dict = 2 * out_channels if double_z else out_channels
A : Any = nn.Convad(block_out_channels[-1], lowerCamelCase__, 3, padding=1 )
A : Tuple = False
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = x
A : Optional[Any] = self.conv_in(lowerCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase__ ):
def custom_forward(*lowerCamelCase__ ):
return module(*lowerCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""", """1.11.0""" ):
for down_block in self.down_blocks:
A : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase__ ), lowerCamelCase__, use_reentrant=lowerCamelCase__ )
# middle
A : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ), lowerCamelCase__, use_reentrant=lowerCamelCase__ )
else:
for down_block in self.down_blocks:
A : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase__ ), lowerCamelCase__ )
# middle
A : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ), lowerCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A : Union[str, Any] = down_block(lowerCamelCase__ )
# middle
A : Dict = self.mid_block(lowerCamelCase__ )
# post-process
A : Any = self.conv_norm_out(lowerCamelCase__ )
A : Dict = self.conv_act(lowerCamelCase__ )
A : Optional[int] = self.conv_out(lowerCamelCase__ )
return sample
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=3, lowerCamelCase__=("UpDecoderBlock2D",), lowerCamelCase__=(64,), lowerCamelCase__=2, lowerCamelCase__=32, lowerCamelCase__="silu", lowerCamelCase__="group", ):
super().__init__()
A : Any = layers_per_block
A : str = nn.Convad(
lowerCamelCase__, block_out_channels[-1], kernel_size=3, stride=1, padding=1, )
A : int = None
A : Union[str, Any] = nn.ModuleList([] )
A : Optional[Any] = in_channels if norm_type == """spatial""" else None
# mid
A : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1], resnet_eps=1e-6, resnet_act_fn=lowerCamelCase__, output_scale_factor=1, resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type, attention_head_dim=block_out_channels[-1], resnet_groups=lowerCamelCase__, temb_channels=lowerCamelCase__, )
# up
A : int = list(reversed(lowerCamelCase__ ) )
A : str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
A : Optional[int] = output_channel
A : int = reversed_block_out_channels[i]
A : Any = i == len(lowerCamelCase__ ) - 1
A : Optional[int] = get_up_block(
lowerCamelCase__, num_layers=self.layers_per_block + 1, in_channels=lowerCamelCase__, out_channels=lowerCamelCase__, prev_output_channel=lowerCamelCase__, add_upsample=not is_final_block, resnet_eps=1e-6, resnet_act_fn=lowerCamelCase__, resnet_groups=lowerCamelCase__, attention_head_dim=lowerCamelCase__, temb_channels=lowerCamelCase__, resnet_time_scale_shift=lowerCamelCase__, )
self.up_blocks.append(lowerCamelCase__ )
A : Optional[int] = output_channel
# out
if norm_type == "spatial":
A : List[str] = SpatialNorm(block_out_channels[0], lowerCamelCase__ )
else:
A : Dict = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=lowerCamelCase__, eps=1e-6 )
A : List[Any] = nn.SiLU()
A : List[str] = nn.Convad(block_out_channels[0], lowerCamelCase__, 3, padding=1 )
A : Optional[int] = False
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
A : List[Any] = z
A : Union[str, Any] = self.conv_in(lowerCamelCase__ )
A : str = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase__ ):
def custom_forward(*lowerCamelCase__ ):
return module(*lowerCamelCase__ )
return custom_forward
if is_torch_version(""">=""", """1.11.0""" ):
# middle
A : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ), lowerCamelCase__, lowerCamelCase__, use_reentrant=lowerCamelCase__ )
A : str = sample.to(lowerCamelCase__ )
# up
for up_block in self.up_blocks:
A : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase__ ), lowerCamelCase__, lowerCamelCase__, use_reentrant=lowerCamelCase__ )
else:
# middle
A : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ), lowerCamelCase__, lowerCamelCase__ )
A : Optional[int] = sample.to(lowerCamelCase__ )
# up
for up_block in self.up_blocks:
A : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase__ ), lowerCamelCase__, lowerCamelCase__ )
else:
# middle
A : Union[str, Any] = self.mid_block(lowerCamelCase__, lowerCamelCase__ )
A : str = sample.to(lowerCamelCase__ )
# up
for up_block in self.up_blocks:
A : str = up_block(lowerCamelCase__, lowerCamelCase__ )
# post-process
if latent_embeds is None:
A : int = self.conv_norm_out(lowerCamelCase__ )
else:
A : Optional[int] = self.conv_norm_out(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = self.conv_act(lowerCamelCase__ )
A : Dict = self.conv_out(lowerCamelCase__ )
return sample
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__="random", lowerCamelCase__=False, lowerCamelCase__=True ):
super().__init__()
A : List[str] = n_e
A : str = vq_embed_dim
A : Any = beta
A : Optional[Any] = legacy
A : Optional[int] = nn.Embedding(self.n_e, self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e )
A : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""", torch.tensor(np.load(self.remap ) ) )
A : Tuple = self.used.shape[0]
A : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A : str = self.re_embed
A : Union[str, Any] = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A : List[Any] = n_e
A : Tuple = sane_index_shape
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = inds.shape
assert len(lowerCamelCase__ ) > 1
A : Dict = inds.reshape(ishape[0], -1 )
A : str = self.used.to(lowerCamelCase__ )
A : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
A : int = match.argmax(-1 )
A : Union[str, Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
A : Union[str, Any] = torch.randint(0, self.re_embed, size=new[unknown].shape ).to(device=new.device )
else:
A : str = self.unknown_index
return new.reshape(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = inds.shape
assert len(lowerCamelCase__ ) > 1
A : str = inds.reshape(ishape[0], -1 )
A : Tuple = self.used.to(lowerCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A : Any = 0 # simply set to zero
A : str = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, lowerCamelCase__ )
return back.reshape(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
# reshape z -> (batch, height, width, channel) and flatten
A : Tuple = z.permute(0, 2, 3, 1 ).contiguous()
A : Dict = z.view(-1, self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A : str = torch.argmin(torch.cdist(lowerCamelCase__, self.embedding.weight ), dim=1 )
A : List[str] = self.embedding(lowerCamelCase__ ).view(z.shape )
A : Union[str, Any] = None
A : Optional[int] = None
# compute loss for embedding
if not self.legacy:
A : str = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
A : Optional[Any] = z_q.permute(0, 3, 1, 2 ).contiguous()
if self.remap is not None:
A : Optional[int] = min_encoding_indices.reshape(z.shape[0], -1 ) # add batch axis
A : Optional[int] = self.remap_to_used(lowerCamelCase__ )
A : List[Any] = min_encoding_indices.reshape(-1, 1 ) # flatten
if self.sane_index_shape:
A : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A : List[Any] = indices.reshape(shape[0], -1 ) # add batch axis
A : Optional[Any] = self.unmap_to_all(lowerCamelCase__ )
A : str = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A : str = self.embedding(lowerCamelCase__ )
if shape is not None:
A : int = z_q.view(lowerCamelCase__ )
# reshape back to match original input shape
A : str = z_q.permute(0, 3, 1, 2 ).contiguous()
return z_q
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=False ):
A : Union[str, Any] = parameters
A , A : List[Any] = torch.chunk(lowerCamelCase__, 2, dim=1 )
A : Any = torch.clamp(self.logvar, -30.0, 20.0 )
A : Tuple = deterministic
A : str = torch.exp(0.5 * self.logvar )
A : List[Any] = torch.exp(self.logvar )
if self.deterministic:
A : int = torch.zeros_like(
self.mean, device=self.parameters.device, dtype=self.parameters.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ = None ):
# make sure sample is on the same device as the parameters and has same dtype
A : str = randn_tensor(
self.mean.shape, generator=lowerCamelCase__, device=self.parameters.device, dtype=self.parameters.dtype )
A : Optional[int] = self.mean + self.std * sample
return x
def _lowerCAmelCase ( self, lowerCamelCase__=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean, 2 ) + self.var - 1.0 - self.logvar, dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar, dim=[1, 2, 3], )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2 ) / self.var, dim=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
return self.mean
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[Any] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "ctrl"
__lowerCamelCase : List[str] = ["past_key_values"]
__lowerCamelCase : Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCamelCase__=24_6534, lowerCamelCase__=256, lowerCamelCase__=1280, lowerCamelCase__=8192, lowerCamelCase__=48, lowerCamelCase__=16, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=1e-6, lowerCamelCase__=0.02, lowerCamelCase__=True, **lowerCamelCase__, ):
A : List[Any] = vocab_size
A : List[str] = n_positions
A : Union[str, Any] = n_embd
A : str = n_layer
A : List[Any] = n_head
A : Optional[int] = dff
A : str = resid_pdrop
A : Optional[Any] = embd_pdrop
A : int = layer_norm_epsilon
A : List[str] = initializer_range
A : Optional[int] = use_cache
super().__init__(**lowerCamelCase__ )
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : int = Mock()
A : Tuple = conn, Mock()
A : Optional[int] = iter([1, None] )
A : str = lambda _lowerCAmelCase : next(_lowerCAmelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:str = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = "perceiver"
def __init__( self, lowerCamelCase__=256, lowerCamelCase__=1280, lowerCamelCase__=768, lowerCamelCase__=1, lowerCamelCase__=26, lowerCamelCase__=8, lowerCamelCase__=8, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="kv", lowerCamelCase__=1, lowerCamelCase__=1, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=True, lowerCamelCase__=262, lowerCamelCase__=2048, lowerCamelCase__=56, lowerCamelCase__=[368, 496], lowerCamelCase__=16, lowerCamelCase__=1920, lowerCamelCase__=16, lowerCamelCase__=[1, 16, 224, 224], **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : List[str] = num_latents
A : Optional[int] = d_latents
A : List[Any] = d_model
A : Optional[int] = num_blocks
A : Tuple = num_self_attends_per_block
A : List[Any] = num_self_attention_heads
A : int = num_cross_attention_heads
A : Optional[Any] = qk_channels
A : Optional[Any] = v_channels
A : List[str] = cross_attention_shape_for_attention
A : List[Any] = self_attention_widening_factor
A : List[Any] = cross_attention_widening_factor
A : List[Any] = hidden_act
A : List[str] = attention_probs_dropout_prob
A : Dict = initializer_range
A : Dict = layer_norm_eps
A : Union[str, Any] = use_query_residual
# masked language modeling attributes
A : List[str] = vocab_size
A : Union[str, Any] = max_position_embeddings
# image classification attributes
A : Union[str, Any] = image_size
# flow attributes
A : List[str] = train_size
# multimodal autoencoding attributes
A : Optional[Any] = num_frames
A : Optional[int] = audio_samples_per_frame
A : List[str] = samples_per_patch
A : List[str] = output_shape
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def _lowerCAmelCase ( self ):
return 1e-4
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = -1, lowerCamelCase__ = -1, lowerCamelCase__ = -1, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = 3, lowerCamelCase__ = 40, lowerCamelCase__ = 40, ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : str = compute_effective_axis_dimension(
lowerCamelCase__, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A : Tuple = preprocessor.num_special_tokens_to_add(lowerCamelCase__ )
A : Any = compute_effective_axis_dimension(
lowerCamelCase__, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A : int = [""" """.join(["""a"""] ) * seq_length] * batch_size
A : Union[str, Any] = dict(preprocessor(lowerCamelCase__, return_tensors=lowerCamelCase__ ) )
A : Union[str, Any] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(lowerCamelCase__, lowerCamelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : Tuple = compute_effective_axis_dimension(lowerCamelCase__, fixed_dimension=OnnxConfig.default_fixed_batch )
A : str = self._generate_dummy_images(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = dict(preprocessor(images=lowerCamelCase__, return_tensors=lowerCamelCase__ ) )
A : List[str] = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
from heapq import heappop, heappush
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
A , A : Tuple = grid.shape
A : List[str] = [-1, 1, 0, 0]
A : int = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A , A : Any = [(0, source)], set()
A : List[Any] = np.full((rows, cols) , np.inf )
A : Tuple = 0
A : List[str] = np.empty((rows, cols) , dtype=_lowerCAmelCase )
A : Any = None
while queue:
((A) , (A)) : Optional[int] = heappop(_lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A : str = []
while (x, y) != source:
path.append((x, y) )
A , A : str = predecessors[x, y]
path.append(_lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCAmelCase ) ):
A , A : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCAmelCase , (dist + 1, (nx, ny)) )
A : List[str] = dist + 1
A : str = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list:
"""simple docstring"""
A : str = []
A , A : List[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
A : Tuple = result + left + right
return input_list
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
if len(_lowerCAmelCase ) <= 1:
return input_list
A : Dict = list(_lowerCAmelCase )
# iteration for two-way merging
A : Optional[int] = 2
while p <= len(_lowerCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ):
A : List[str] = i
A : List[Any] = i + p - 1
A : str = (low + high + 1) // 2
A : Optional[int] = merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# final merge of last two parts
if p * 2 >= len(_lowerCAmelCase ):
A : List[str] = i
A : Dict = merge(_lowerCAmelCase , 0 , _lowerCAmelCase , len(_lowerCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
SCREAMING_SNAKE_CASE_:Dict = []
else:
SCREAMING_SNAKE_CASE_:str = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=3, lowerCamelCase__=18, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], ):
A : str = parent
A : str = batch_size
A : Optional[Any] = num_channels
A : Dict = image_size
A : str = min_resolution
A : str = max_resolution
A : List[str] = do_resize
A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
A : Any = do_thumbnail
A : Optional[int] = do_align_axis
A : Dict = do_pad
A : Union[str, Any] = do_normalize
A : str = image_mean
A : Tuple = image_std
def _lowerCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = DonutImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : Any = DonutImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_thumbnail""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_align_long_axis""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_pad""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_std""" ) )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""height""": 18, """width""": 20} )
A : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
A : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"""height""": 84, """width""": 42} )
def _lowerCAmelCase ( self ):
pass
@is_flaky()
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input
A : List[str] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
A : Any = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
@is_flaky()
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input
A : int = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
A : Any = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
@is_flaky()
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input
A : Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
A : Tuple = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE_:Dict = """\
"""
SCREAMING_SNAKE_CASE_:Dict = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ), reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""], )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 16, lowerCamelCase__ = True, lowerCamelCase__=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A : str = """cuda"""
else:
A : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
A : Union[str, Any] = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
A : List[Any] = model.to(lowerCamelCase__ )
A : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A : Union[str, Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A : List[str] = model.config.max_length - 1
else:
A : int = model.config.max_length
A : int = tokenizer(
lowerCamelCase__, add_special_tokens=lowerCamelCase__, padding=lowerCamelCase__, truncation=lowerCamelCase__, max_length=lowerCamelCase__, return_tensors="""pt""", return_attention_mask=lowerCamelCase__, ).to(lowerCamelCase__ )
A : Any = encodings["""input_ids"""]
A : List[Any] = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ), 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ), 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A : List[Any] = []
A : int = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0, len(lowerCamelCase__ ), lowerCamelCase__ ) ):
A : Optional[Any] = min(start_index + batch_size, len(lowerCamelCase__ ) )
A : Dict = encoded_texts[start_index:end_index]
A : Dict = attn_masks[start_index:end_index]
if add_start_token:
A : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
A : str = torch.cat([bos_tokens_tensor, encoded_batch], dim=1 )
A : Any = torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask], dim=1 )
A : Any = encoded_batch
with torch.no_grad():
A : int = model(lowerCamelCase__, attention_mask=lowerCamelCase__ ).logits
A : Optional[Any] = out_logits[..., :-1, :].contiguous()
A : Union[str, Any] = labels[..., 1:].contiguous()
A : List[Any] = attn_mask[..., 1:].contiguous()
A : Optional[int] = torch.expa(
(loss_fct(shift_logits.transpose(1, 2 ), lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
SCREAMING_SNAKE_CASE_:List[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
SCREAMING_SNAKE_CASE_:int = typing.Union[np.floataa, int, float] # noqa: UP007
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(_lowerCAmelCase ) - np.asarray(_lowerCAmelCase )) ** 2 ) )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(_lowerCAmelCase , _lowerCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_0000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_0000 , globals=globals() , ) )
benchmark()
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
SCREAMING_SNAKE_CASE_:List[Any] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls ):
A : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls ):
try:
delete_repo(token=cls._token, repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _lowerCAmelCase ( self ):
A : str = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : List[str] = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""test-model-flax""", use_auth_token=self._token )
A : Dict = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : int = flatten_dict(unfreeze(model.params ) )
A : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__, repo_id="""test-model-flax""", push_to_hub=lowerCamelCase__, use_auth_token=self._token )
A : Dict = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
A : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
def _lowerCAmelCase ( self ):
A : str = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : str = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""valid_org/test-model-flax-org""", use_auth_token=self._token )
A : Any = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : Optional[Any] = flatten_dict(unfreeze(model.params ) )
A : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__, repo_id="""valid_org/test-model-flax-org""", push_to_hub=lowerCamelCase__, use_auth_token=self._token )
A : Optional[int] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : Any = flatten_dict(unfreeze(model.params ) )
A : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : int = True
A : str = flatten_dict(modela.params )
A : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
A : Optional[Any] = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : Any = FlaxBertModel(lowerCamelCase__ )
A : Tuple = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__, lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
A : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__, lowerCamelCase__ ) )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : List[str] = FlaxBertModel(lowerCamelCase__ )
A : Dict = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__, lowerCamelCase__ ), max_shard_size="""10KB""" )
with self.assertRaises(lowerCamelCase__ ):
A : int = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : Optional[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__, lowerCamelCase__ ) )
def _lowerCAmelCase ( self ):
A : Tuple = """bert"""
A : List[str] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
A : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : int = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = """bert"""
A : Dict = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
A : Optional[int] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : int = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE_:Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCamelCase : int = 1_0000
__lowerCamelCase : Optional[List[str]] = None
__lowerCamelCase : Optional[datasets.Features] = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : Dict = ParquetConfig
def _lowerCAmelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__, (str, list, tuple) ):
A : List[str] = data_files
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A : Union[str, Any] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"""files""": files} )]
A : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A : List[str] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCamelCase__ ):
with open(lowerCamelCase__, """rb""" ) as f:
A : int = datasets.Features.from_arrow_schema(pq.read_schema(lowerCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=lowerCamelCase__, gen_kwargs={"""files""": files} ) )
return splits
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A : Tuple = table_cast(lowerCamelCase__, self.info.features.arrow_schema )
return pa_table
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
with open(lowerCamelCase__, """rb""" ) as f:
A : Union[str, Any] = pq.ParquetFile(lowerCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns ) ):
A : List[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(lowerCamelCase__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCamelCase__ )}: {e}''' )
raise
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE_:List[str] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
A : List[Any] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[int] = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
A : int = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Any = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
A : Tuple = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
A : Tuple = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A : str = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
A : Optional[int] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Union[str, Any] = {}
import re
A : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : Union[str, Any] = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[str] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : List[str] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : int = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : Dict = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : Union[str, Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
A : List[str] = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : Dict = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCAmelCase ):
A : Any = re_encoder_block_conv_in.match(_lowerCAmelCase )
A : Optional[int] = regex_match.groups()
A : List[str] = int(groups[2] ) * 2 + int(groups[3] )
A : List[str] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
A : List[Any] = re_encoder_block_conv_in.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : List[str] = re_encoder_block_resnet.match(_lowerCAmelCase )
A : Optional[Any] = regex_match.groups()
A : Tuple = int(groups[2] ) * 2 + int(groups[3] )
A : List[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
A : Any = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Tuple = prefix + resnet_block
A : Tuple = re_encoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCAmelCase ):
A : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCAmelCase )
A : List[str] = regex_match.groups()
A : Dict = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
A : str = re_encoder_block_proj_out.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCAmelCase ):
A : int = re_decoder_block_conv_out.match(_lowerCAmelCase )
A : Dict = regex_match.groups()
A : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : Union[str, Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
A : int = re_decoder_block_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_decoder_block_resnet.match(_lowerCAmelCase )
A : List[str] = regex_match.groups()
A : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : List[str] = {"""1""": 1, """3""": 2}[groups[-2]]
A : Tuple = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
A : int = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Dict = prefix + resnet_block
A : Union[str, Any] = re_decoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCAmelCase ):
A : Tuple = re_decoder_block_proj_in.match(_lowerCAmelCase )
A : int = regex_match.groups()
A : str = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
A : int = re_decoder_block_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCAmelCase ):
A : str = re_prior_cond_conv_out.match(_lowerCAmelCase )
A : Dict = regex_match.groups()
A : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : List[Any] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
A : str = re_prior_cond_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_prior_cond_resnet.match(_lowerCAmelCase )
A : Optional[Any] = regex_match.groups()
A : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
A : Dict = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : str = prefix + resnet_block
A : str = re_prior_cond_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_prior_cond_proj_in.match(_lowerCAmelCase )
A : Optional[Any] = regex_match.groups()
A : List[str] = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
A : Optional[int] = re_prior_cond_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# keep original key
else:
A : List[str] = original_key
A : Dict = replace_key(_lowerCAmelCase )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
A : Union[str, Any] = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
A : Optional[Any] = original_key
A : Union[str, Any] = original_key
A : Optional[Any] = value
return new_dict
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Tuple:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
A : Tuple = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_lowerCAmelCase )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_lowerCAmelCase )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
A : List[str] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
A : Union[str, Any] = JukeboxConfig.from_pretrained(_lowerCAmelCase )
A : Optional[int] = JukeboxModel(_lowerCAmelCase )
A : Optional[int] = []
A : Any = {}
for i, dict_name in enumerate(_lowerCAmelCase ):
A : Tuple = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
A : int = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
A : str = old_dic[k]
elif k.endswith(""".w""" ):
A : Any = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A : List[Any] = old_dic[k]
else:
A : Union[str, Any] = old_dic[k]
A : int = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
A : List[Any] = fix_jukebox_keys(_lowerCAmelCase , model.state_dict() , _lowerCAmelCase , _lowerCAmelCase )
weight_dict.append(_lowerCAmelCase )
A : List[str] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE_:Tuple = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
SCREAMING_SNAKE_CASE_:Dict = """
import os
"""
SCREAMING_SNAKE_CASE_:Any = """
def foo():
import os
return False
"""
SCREAMING_SNAKE_CASE_:str = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
SCREAMING_SNAKE_CASE_:str = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE_:Any = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
SCREAMING_SNAKE_CASE_:Tuple = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
SCREAMING_SNAKE_CASE_:Tuple = """
import os
try:
import bar
except:
raise ValueError()
"""
SCREAMING_SNAKE_CASE_:Optional[int] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE_:List[str] = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
SCREAMING_SNAKE_CASE_:List[str] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = os.path.join(_lowerCAmelCase , """test_file.py""" )
with open(_lowerCAmelCase , """w""" ) as _tmp_file:
_tmp_file.write(_lowerCAmelCase )
A : List[Any] = get_imports(_lowerCAmelCase )
assert parsed_imports == ["os"]
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
import warnings
from functools import wraps
from typing import Callable
def __UpperCamelCase ( _lowerCAmelCase ) -> Callable:
"""simple docstring"""
@wraps(_lowerCAmelCase )
def _inner_fn(*_lowerCAmelCase , **_lowerCAmelCase ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , _lowerCAmelCase , )
return fn(*_lowerCAmelCase , **_lowerCAmelCase )
return _inner_fn
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = "SpeechT5FeatureExtractor"
__lowerCamelCase : List[str] = "SpeechT5Tokenizer"
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__(lowerCamelCase__, lowerCamelCase__ )
def __call__( self, *lowerCamelCase__, **lowerCamelCase__ ):
A : Any = kwargs.pop("""audio""", lowerCamelCase__ )
A : int = kwargs.pop("""text""", lowerCamelCase__ )
A : Union[str, Any] = kwargs.pop("""text_target""", lowerCamelCase__ )
A : Union[str, Any] = kwargs.pop("""audio_target""", lowerCamelCase__ )
A : List[str] = kwargs.pop("""sampling_rate""", lowerCamelCase__ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
A : Dict = self.feature_extractor(lowerCamelCase__, *lowerCamelCase__, sampling_rate=lowerCamelCase__, **lowerCamelCase__ )
elif text is not None:
A : Union[str, Any] = self.tokenizer(lowerCamelCase__, **lowerCamelCase__ )
else:
A : Optional[int] = None
if audio_target is not None:
A : int = self.feature_extractor(audio_target=lowerCamelCase__, *lowerCamelCase__, sampling_rate=lowerCamelCase__, **lowerCamelCase__ )
A : Optional[int] = targets["""input_values"""]
elif text_target is not None:
A : Optional[Any] = self.tokenizer(lowerCamelCase__, **lowerCamelCase__ )
A : Optional[Any] = targets["""input_ids"""]
else:
A : int = None
if inputs is None:
return targets
if targets is not None:
A : Dict = labels
A : List[Any] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
A : int = decoder_attention_mask
return inputs
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
A : str = kwargs.pop("""input_values""", lowerCamelCase__ )
A : str = kwargs.pop("""input_ids""", lowerCamelCase__ )
A : Tuple = kwargs.pop("""labels""", lowerCamelCase__ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
A : Dict = self.feature_extractor.pad(lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ )
elif input_ids is not None:
A : Dict = self.tokenizer.pad(lowerCamelCase__, **lowerCamelCase__ )
else:
A : Tuple = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCamelCase__, lowerCamelCase__ ) and "input_ids" in labels[0]):
A : str = self.tokenizer.pad(lowerCamelCase__, **lowerCamelCase__ )
A : Dict = targets["""input_ids"""]
else:
A : List[str] = self.feature_extractor.feature_size
A : int = self.feature_extractor.num_mel_bins
A : int = self.feature_extractor.pad(lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ )
A : Any = feature_size_hack
A : Optional[int] = targets["""input_values"""]
else:
A : Dict = None
if inputs is None:
return targets
if targets is not None:
A : Any = labels
A : int = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
A : str = decoder_attention_mask
return inputs
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.decode(*lowerCamelCase__, **lowerCamelCase__ )
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE_:Optional[Any] = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE_:int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import warnings
from .generation import TFGenerationMixin
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , SCREAMING_SNAKE_CASE__ , )
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : int = """huggingface/label-files"""
A : Optional[int] = """imagenet-1k-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : Any = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : Any = {v: k for k, v in idalabel.items()}
A : Dict = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A : str = BitConfig(
conv_layer=_lowerCAmelCase , num_labels=1000 , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , )
return config
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
if "stem.conv" in name:
A : Tuple = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
A : Optional[int] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
A : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
A : Tuple = """bit.""" + name
if "bit" not in name and "classifier" not in name:
A : Dict = """bit.encoder.""" + name
return name
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = get_config(_lowerCAmelCase )
# load original model from timm
A : Dict = create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model
A : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
A : List[str] = state_dict.pop(_lowerCAmelCase )
A : List[Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
A : List[Any] = BitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# create image processor
A : Union[str, Any] = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) )
A : List[Any] = transform.transforms
A : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A : Tuple = BitImageProcessor(
do_resize=_lowerCAmelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A : Optional[Any] = prepare_img()
A : Optional[Any] = transform(_lowerCAmelCase ).unsqueeze(0 )
A : Optional[Any] = processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
# verify logits
with torch.no_grad():
A : List[str] = model(_lowerCAmelCase )
A : Any = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
A : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
SCREAMING_SNAKE_CASE_:int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__, """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__, """depth_multiplier""" ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=3, lowerCamelCase__=32, lowerCamelCase__=0.25, lowerCamelCase__=8, lowerCamelCase__=True, lowerCamelCase__=1024, lowerCamelCase__=32, lowerCamelCase__="relu6", lowerCamelCase__=0.1, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=10, lowerCamelCase__=None, ):
A : Optional[Any] = parent
A : List[str] = batch_size
A : Optional[Any] = num_channels
A : str = image_size
A : List[str] = depth_multiplier
A : List[Any] = min_depth
A : List[str] = tf_padding
A : List[str] = int(last_hidden_size * depth_multiplier )
A : List[Any] = output_stride
A : Optional[Any] = hidden_act
A : Union[str, Any] = classifier_dropout_prob
A : str = use_labels
A : Optional[int] = is_training
A : str = num_labels
A : Optional[int] = initializer_range
A : Dict = scope
def _lowerCAmelCase ( self ):
A : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : str = None
A : Optional[Any] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size], self.num_labels )
A : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
A : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = MobileNetVaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : str = self.num_labels
A : Any = MobileNetVaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Union[str, Any] = model(lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ):
A : List[str] = self.prepare_config_and_inputs()
A , A , A , A : Any = config_and_inputs
A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__lowerCamelCase : Dict = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Any = False
def _lowerCAmelCase ( self ):
A : int = MobileNetVaModelTester(self )
A : Optional[int] = MobileNetVaConfigTester(self, config_class=lowerCamelCase__, has_text_modality=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = model_class(lowerCamelCase__ )
A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Dict = [*signature.parameters.keys()]
A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
def check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
A : Optional[int] = model(**self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ ) )
A : int = outputs.hidden_states
A : List[str] = 26
self.assertEqual(len(lowerCamelCase__ ), lowerCamelCase__ )
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = True
check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Optional[Any] = True
check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Dict = MobileNetVaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def _lowerCAmelCase ( self ):
A : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowerCamelCase__ )
A : Dict = self.default_image_processor
A : List[str] = prepare_img()
A : Tuple = image_processor(images=lowerCamelCase__, return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A : str = model(**lowerCamelCase__ )
# verify the logits
A : Optional[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape, lowerCamelCase__ )
A : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __UpperCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
A : List[str] = [randint(-1000 , 1000 ) for i in range(10 )]
A : Dict = randint(-5000 , 5000 )
return (arr, r)
SCREAMING_SNAKE_CASE_:Tuple = make_dataset()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_lowerCAmelCase , 3 ):
if sum(_lowerCAmelCase ) == target:
return tuple(sorted(_lowerCAmelCase ) )
return (0, 0, 0)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
A : Any = len(_lowerCAmelCase )
for i in range(n - 1 ):
A , A : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __UpperCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
A : Dict = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
A : Optional[Any] = """
triplet_sum1(*dataset)
"""
A : Optional[Any] = """
triplet_sum2(*dataset)
"""
A : List[str] = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
A : Any = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
return (min(_lowerCAmelCase ), min(_lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_:Union[str, Any] = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE_:List[Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE_:Optional[Any] = object()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Any = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(_lowerCAmelCase ) - len(_lowerCAmelCase ) + 1 ):
A : Union[str, Any] = [x.match(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , ks[i:] )]
if matches and all(_lowerCAmelCase ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
def replace(_lowerCAmelCase , _lowerCAmelCase ):
for rule, replacement in rules:
if _match(_lowerCAmelCase , _lowerCAmelCase ):
return replacement
return val
return replace
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , _lowerCAmelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , _lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowerCAmelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , _lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowerCAmelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , _lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : int = _get_partition_rules()
A : List[Any] = _replacement_rules(_lowerCAmelCase )
A : Optional[Any] = {k: _unmatched for k in flatten_dict(_lowerCAmelCase )}
A : Dict = {k: replace(_lowerCAmelCase , _lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowerCAmelCase ) )
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE_:int = """src/transformers"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """docs/source/en/tasks"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A : int = f.readlines()
# Find the start prompt.
A : Any = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
A : Optional[int] = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_:Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE_:Dict = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
A : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCAmelCase , set() )
A : Any = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
"""simple docstring"""
A , A , A , A : Tuple = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
A : int = get_model_list_for_task(_lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
""" to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
SCREAMING_SNAKE_CASE_:Union[str, Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(lowerCamelCase__ )
self.set_fail_transitions()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = 0
for character in keyword:
A : Dict = self.find_next_state(lowerCamelCase__, lowerCamelCase__ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
A : Union[str, Any] = len(self.adlist ) - 1
else:
A : Union[str, Any] = next_state
self.adlist[current_state]["output"].append(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCamelCase__ )
A : Optional[Any] = 0
while q:
A : int = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCamelCase__ )
A : List[str] = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(lowerCamelCase__, self.adlist[child]["""value"""] ) is None
and state != 0
):
A : Optional[int] = self.adlist[state]["""fail_state"""]
A : Any = self.find_next_state(
lowerCamelCase__, self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
A : Optional[Any] = 0
A : List[Any] = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : dict = {} # returns a dict with keywords and list of its occurrences
A : Tuple = 0
for i in range(len(lowerCamelCase__ ) ):
while (
self.find_next_state(lowerCamelCase__, string[i] ) is None
and current_state != 0
):
A : int = self.adlist[current_state]["""fail_state"""]
A : Union[str, Any] = self.find_next_state(lowerCamelCase__, string[i] )
if next_state is None:
A : str = 0
else:
A : Optional[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
A : Optional[Any] = []
result[key].append(i - len(lowerCamelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "gpt_neox"
def __init__( self, lowerCamelCase__=5_0432, lowerCamelCase__=6144, lowerCamelCase__=44, lowerCamelCase__=64, lowerCamelCase__=2_4576, lowerCamelCase__="gelu", lowerCamelCase__=0.25, lowerCamelCase__=1_0000, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.1, lowerCamelCase__=2048, lowerCamelCase__=0.02, lowerCamelCase__=1e-5, lowerCamelCase__=True, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : List[str] = vocab_size
A : Tuple = max_position_embeddings
A : Optional[Any] = hidden_size
A : Tuple = num_hidden_layers
A : Tuple = num_attention_heads
A : List[Any] = intermediate_size
A : int = hidden_act
A : Tuple = rotary_pct
A : int = rotary_emb_base
A : int = attention_dropout
A : List[str] = hidden_dropout
A : str = classifier_dropout
A : Optional[int] = initializer_range
A : List[Any] = layer_norm_eps
A : List[Any] = use_cache
A : Any = tie_word_embeddings
A : Tuple = use_parallel_residual
A : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def _lowerCAmelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
A : Dict = self.rope_scaling.get("""type""", lowerCamelCase__ )
A : List[str] = self.rope_scaling.get("""factor""", lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__, lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=3, lowerCamelCase__=32, lowerCamelCase__=3, lowerCamelCase__=10, lowerCamelCase__=[10, 20, 30, 40], lowerCamelCase__=[1, 1, 2, 1], lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__="relu", lowerCamelCase__=3, lowerCamelCase__=None, ):
A : int = parent
A : str = batch_size
A : List[str] = image_size
A : Dict = num_channels
A : Optional[int] = embeddings_size
A : Optional[int] = hidden_sizes
A : List[str] = depths
A : List[Any] = is_training
A : int = use_labels
A : str = hidden_act
A : List[Any] = num_labels
A : Union[str, Any] = scope
A : Optional[int] = len(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Union[str, Any] = None
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size], self.num_labels )
A : List[str] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = RegNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Tuple = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Dict = self.num_labels
A : Dict = RegNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[Any] = model(lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ):
A : str = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__lowerCamelCase : Optional[int] = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Any = False
__lowerCamelCase : str = False
def _lowerCAmelCase ( self ):
A : List[str] = RegNetModelTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, has_text_modality=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ):
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = model_class(lowerCamelCase__ )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : List[str] = [*signature.parameters.keys()]
A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
self.assertTrue(
torch.all(module.bias == 0 ), msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
def _lowerCAmelCase ( self ):
def check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
A : Any = model(**self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : str = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
A , A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : List[Any] = layer_type
A : Tuple = True
check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Optional[Any] = True
check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = RegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self ):
A : Dict = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
A : Union[str, Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Union[str, Any] = image_processor(images=lowerCamelCase__, return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A : List[str] = model(**lowerCamelCase__ )
# verify the logits
A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase__ )
A : Union[str, Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=3, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=True, lowerCamelCase__=1 / 255, lowerCamelCase__=True, ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Any = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A : Dict = parent
A : int = batch_size
A : Optional[int] = num_channels
A : Tuple = min_resolution
A : Any = max_resolution
A : Dict = do_resize
A : List[str] = size
A : List[Any] = do_normalize
A : Optional[Any] = image_mean
A : List[str] = image_std
A : Union[str, Any] = do_rescale
A : Dict = rescale_factor
A : Optional[int] = do_pad
def _lowerCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=False ):
if not batched:
A : List[str] = image_inputs[0]
if isinstance(lowerCamelCase__, Image.Image ):
A , A : Any = image.size
else:
A , A : str = image.shape[1], image.shape[2]
if w < h:
A : Optional[Any] = int(self.size["""shortest_edge"""] * h / w )
A : Dict = self.size["""shortest_edge"""]
elif w > h:
A : Optional[Any] = self.size["""shortest_edge"""]
A : Dict = int(self.size["""shortest_edge"""] * w / h )
else:
A : Optional[int] = self.size["""shortest_edge"""]
A : Optional[Any] = self.size["""shortest_edge"""]
else:
A : Union[str, Any] = []
for image in image_inputs:
A , A : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Optional[Any] = max(lowerCamelCase__, key=lambda lowerCamelCase__ : item[0] )[0]
A : str = max(lowerCamelCase__, key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DetaImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : Dict = DetaImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_rescale""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_pad""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size""" ) )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input
A : int = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A , A : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A , A : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__, batched=lowerCamelCase__ )
A : Union[str, Any] = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input
A : str = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A , A : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A : Dict = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
A , A : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__, batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input
A : Dict = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A , A : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A : Any = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
A , A : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__, batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def _lowerCAmelCase ( self ):
# prepare image and target
A : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""", """r""" ) as f:
A : int = json.loads(f.read() )
A : Tuple = {"""image_id""": 3_9769, """annotations""": target}
# encode them
A : Dict = DetaImageProcessor()
A : str = image_processing(images=lowerCamelCase__, annotations=lowerCamelCase__, return_tensors="""pt""" )
# verify pixel values
A : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], lowerCamelCase__, atol=1e-4 ) )
# verify area
A : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], lowerCamelCase__ ) )
# verify boxes
A : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, lowerCamelCase__ )
A : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], lowerCamelCase__, atol=1e-3 ) )
# verify image_id
A : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], lowerCamelCase__ ) )
# verify is_crowd
A : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], lowerCamelCase__ ) )
# verify class_labels
A : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], lowerCamelCase__ ) )
# verify orig_size
A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], lowerCamelCase__ ) )
# verify size
A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], lowerCamelCase__ ) )
@slow
def _lowerCAmelCase ( self ):
# prepare image, target and masks_path
A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""", """r""" ) as f:
A : List[Any] = json.loads(f.read() )
A : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
A : Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A : str = DetaImageProcessor(format="""coco_panoptic""" )
A : Optional[int] = image_processing(images=lowerCamelCase__, annotations=lowerCamelCase__, masks_path=lowerCamelCase__, return_tensors="""pt""" )
# verify pixel values
A : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape, lowerCamelCase__ )
A : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], lowerCamelCase__, atol=1e-4 ) )
# verify area
A : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], lowerCamelCase__ ) )
# verify boxes
A : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, lowerCamelCase__ )
A : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], lowerCamelCase__, atol=1e-3 ) )
# verify image_id
A : Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], lowerCamelCase__ ) )
# verify is_crowd
A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], lowerCamelCase__ ) )
# verify class_labels
A : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], lowerCamelCase__ ) )
# verify masks
A : Dict = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item(), lowerCamelCase__ )
# verify orig_size
A : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], lowerCamelCase__ ) )
# verify size
A : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], lowerCamelCase__ ) )
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[str] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
SCREAMING_SNAKE_CASE_:Tuple = {"""mobilebert-uncased""": 512}
SCREAMING_SNAKE_CASE_:Optional[int] = {}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[Any] = MobileBertTokenizer
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__="[UNK]", lowerCamelCase__="[SEP]", lowerCamelCase__="[PAD]", lowerCamelCase__="[CLS]", lowerCamelCase__="[MASK]", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, tokenizer_file=lowerCamelCase__, do_lower_case=lowerCamelCase__, unk_token=lowerCamelCase__, sep_token=lowerCamelCase__, pad_token=lowerCamelCase__, cls_token=lowerCamelCase__, mask_token=lowerCamelCase__, tokenize_chinese_chars=lowerCamelCase__, strip_accents=lowerCamelCase__, **lowerCamelCase__, )
A : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""", lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", lowerCamelCase__ ) != tokenize_chinese_chars
):
A : int = getattr(lowerCamelCase__, normalizer_state.pop("""type""" ) )
A : Dict = do_lower_case
A : Dict = strip_accents
A : Union[str, Any] = tokenize_chinese_chars
A : Union[str, Any] = normalizer_class(**lowerCamelCase__ )
A : str = do_lower_case
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
A : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Union[str, Any] = [self.sep_token_id]
A : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[str] = self._tokenizer.model.save(lowerCamelCase__, name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_:List[str] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_:Any = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
SCREAMING_SNAKE_CASE_:List[str] = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Dict = ["input_ids", "attention_mask"]
__lowerCamelCase : Dict = DistilBertTokenizer
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__="[UNK]", lowerCamelCase__="[SEP]", lowerCamelCase__="[PAD]", lowerCamelCase__="[CLS]", lowerCamelCase__="[MASK]", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, tokenizer_file=lowerCamelCase__, do_lower_case=lowerCamelCase__, unk_token=lowerCamelCase__, sep_token=lowerCamelCase__, pad_token=lowerCamelCase__, cls_token=lowerCamelCase__, mask_token=lowerCamelCase__, tokenize_chinese_chars=lowerCamelCase__, strip_accents=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""", lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", lowerCamelCase__ ) != tokenize_chinese_chars
):
A : int = getattr(lowerCamelCase__, normalizer_state.pop("""type""" ) )
A : Any = do_lower_case
A : Union[str, Any] = strip_accents
A : List[Any] = tokenize_chinese_chars
A : List[str] = normalizer_class(**lowerCamelCase__ )
A : List[Any] = do_lower_case
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
A : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Tuple = [self.sep_token_id]
A : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase__, name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : int = 5
# Realm tok
A : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A : Dict = os.path.join(self.tmpdirname, """realm_tokenizer""" )
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
A : str = os.path.join(lowerCamelCase__, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A : Tuple = os.path.join(self.tmpdirname, """realm_block_records""" )
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, """realm_tokenizer""" ) )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : int = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowerCAmelCase ( self ):
A : int = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def _lowerCAmelCase ( self ):
A : Any = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
], dtype=lowerCamelCase__, )
return block_records
def _lowerCAmelCase ( self ):
A : Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records(), tokenizer=self.get_tokenizer(), )
return retriever
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.get_config()
A : int = self.get_dummy_retriever()
A : Tuple = retriever.tokenizer
A : str = np.array([0, 3], dtype="""long""" )
A : Tuple = tokenizer(["""Test question"""] ).input_ids
A : int = tokenizer(
["""the fourth"""], add_special_tokens=lowerCamelCase__, return_token_type_ids=lowerCamelCase__, return_attention_mask=lowerCamelCase__, ).input_ids
A : List[str] = config.reader_seq_len
A , A , A , A : Optional[int] = retriever(
lowerCamelCase__, lowerCamelCase__, answer_ids=lowerCamelCase__, max_length=lowerCamelCase__, return_tensors="""np""" )
self.assertEqual(len(lowerCamelCase__ ), 2 )
self.assertEqual(len(lowerCamelCase__ ), 2 )
self.assertEqual(len(lowerCamelCase__ ), 2 )
self.assertEqual(concat_inputs.input_ids.shape, (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape, (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ), ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""], )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ), ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""], )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_config()
A : Tuple = self.get_dummy_retriever()
A : str = retriever.tokenizer
A : Union[str, Any] = np.array([0, 3, 5], dtype="""long""" )
A : Optional[int] = tokenizer(["""Test question"""] ).input_ids
A : Union[str, Any] = tokenizer(
["""the fourth""", """longer longer"""], add_special_tokens=lowerCamelCase__, return_token_type_ids=lowerCamelCase__, return_attention_mask=lowerCamelCase__, ).input_ids
A : Optional[int] = config.reader_seq_len
A , A , A , A : Tuple = retriever(
lowerCamelCase__, lowerCamelCase__, answer_ids=lowerCamelCase__, max_length=lowerCamelCase__, return_tensors="""np""" )
self.assertEqual([False, True, True], lowerCamelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], lowerCamelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname, """realm_block_records""" ) )
# Test local path
A : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname, """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0], B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
A : Any = os.path.join(
os.path.join(self.tmpdirname, """realm_block_records""" ), _REALM_BLOCK_RECORDS_FILENAME )
A : Dict = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0], B"""This is the first record""" )
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=4, ):
A : Any = parent
A : int = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : Optional[Any] = use_attention_mask
A : Dict = use_token_type_ids
A : Optional[int] = use_labels
A : Tuple = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : int = num_attention_heads
A : List[str] = intermediate_size
A : str = hidden_act
A : List[Any] = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : Any = type_vocab_size
A : str = type_sequence_label_size
A : Optional[Any] = initializer_range
A : List[Any] = num_choices
def _lowerCAmelCase ( self ):
A : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : str = None
if self.use_attention_mask:
A : Any = random_attention_mask([self.batch_size, self.seq_length] )
A : List[str] = None
if self.use_token_type_ids:
A : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : str = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
A : Dict = self.prepare_config_and_inputs()
A , A , A , A : List[Any] = config_and_inputs
A : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _lowerCAmelCase ( self ):
A : Optional[int] = self.prepare_config_and_inputs()
A , A , A , A : str = config_and_inputs
A : Optional[int] = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = True
__lowerCamelCase : Dict = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
A : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A : int = model_class_name.from_pretrained("""roberta-base""", from_pt=lowerCamelCase__ )
A : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=24, lowerCamelCase__=2, lowerCamelCase__=6, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=3, lowerCamelCase__=None, lowerCamelCase__=1000, ):
A : Optional[int] = parent
A : Any = batch_size
A : Any = seq_length
A : Dict = is_training
A : Optional[Any] = use_input_mask
A : int = use_token_type_ids
A : List[str] = use_labels
A : Any = vocab_size
A : int = hidden_size
A : List[Any] = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Any = intermediate_size
A : Optional[int] = hidden_act
A : List[Any] = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = type_vocab_size
A : List[Any] = type_sequence_label_size
A : List[Any] = initializer_range
A : str = num_labels
A : Any = scope
A : List[str] = range_bbox
def _lowerCAmelCase ( self ):
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A : List[str] = bbox[i, j, 3]
A : Union[str, Any] = bbox[i, j, 1]
A : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A : Optional[Any] = bbox[i, j, 2]
A : Optional[Any] = bbox[i, j, 0]
A : Optional[int] = t
A : Optional[Any] = None
if self.use_input_mask:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
A : Union[str, Any] = None
if self.use_token_type_ids:
A : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : int = None
A : Optional[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
A : Dict = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCAmelCase ( self ):
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : Any = LiltModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : int = model(lowerCamelCase__, bbox=lowerCamelCase__, attention_mask=lowerCamelCase__, token_type_ids=lowerCamelCase__ )
A : Tuple = model(lowerCamelCase__, bbox=lowerCamelCase__, token_type_ids=lowerCamelCase__ )
A : Union[str, Any] = model(lowerCamelCase__, bbox=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : Tuple = self.num_labels
A : Optional[Any] = LiltForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : int = model(
lowerCamelCase__, bbox=lowerCamelCase__, attention_mask=lowerCamelCase__, token_type_ids=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : List[Any] = LiltForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(
lowerCamelCase__, bbox=lowerCamelCase__, attention_mask=lowerCamelCase__, token_type_ids=lowerCamelCase__, start_positions=lowerCamelCase__, end_positions=lowerCamelCase__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Optional[Any] = config_and_inputs
A : str = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Tuple = False
__lowerCamelCase : List[str] = False
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
return True
def _lowerCAmelCase ( self ):
A : int = LiltModelTester(self )
A : Dict = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : Union[str, Any] = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : str = LiltModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Optional[int] = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase__ )
A : Tuple = torch.tensor([[1, 2]], device=lowerCamelCase__ )
A : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=lowerCamelCase__ )
# forward pass
with torch.no_grad():
A : Any = model(input_ids=lowerCamelCase__, bbox=lowerCamelCase__ )
A : Optional[int] = torch.Size([1, 2, 768] )
A : Dict = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]], device=lowerCamelCase__, )
self.assertTrue(outputs.last_hidden_state.shape, lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], lowerCamelCase__, atol=1e-3 ) )
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
SCREAMING_SNAKE_CASE_:Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
SCREAMING_SNAKE_CASE_:Optional[int] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = "whisper"
__lowerCamelCase : Any = ["past_key_values"]
__lowerCamelCase : Dict = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, lowerCamelCase__=5_1865, lowerCamelCase__=80, lowerCamelCase__=6, lowerCamelCase__=4, lowerCamelCase__=6, lowerCamelCase__=4, lowerCamelCase__=1536, lowerCamelCase__=1536, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=5_0257, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__="gelu", lowerCamelCase__=256, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=False, lowerCamelCase__=1500, lowerCamelCase__=448, lowerCamelCase__=5_0256, lowerCamelCase__=5_0256, lowerCamelCase__=5_0256, lowerCamelCase__=None, lowerCamelCase__=[220, 5_0256], lowerCamelCase__=False, lowerCamelCase__=256, lowerCamelCase__=False, lowerCamelCase__=0.05, lowerCamelCase__=10, lowerCamelCase__=2, lowerCamelCase__=0.0, lowerCamelCase__=10, lowerCamelCase__=0, lowerCamelCase__=7, **lowerCamelCase__, ):
A : Any = vocab_size
A : Union[str, Any] = num_mel_bins
A : Any = d_model
A : int = encoder_layers
A : Any = encoder_attention_heads
A : Optional[Any] = decoder_layers
A : Union[str, Any] = decoder_attention_heads
A : Optional[int] = decoder_ffn_dim
A : List[str] = encoder_ffn_dim
A : List[Any] = dropout
A : int = attention_dropout
A : List[str] = activation_dropout
A : Any = activation_function
A : Optional[Any] = init_std
A : Any = encoder_layerdrop
A : List[str] = decoder_layerdrop
A : Union[str, Any] = use_cache
A : int = encoder_layers
A : Any = scale_embedding # scale factor will be sqrt(d_model) if True
A : Optional[Any] = max_source_positions
A : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A : Optional[Any] = classifier_proj_size
A : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : Union[str, Any] = apply_spec_augment
A : Optional[int] = mask_time_prob
A : int = mask_time_length
A : str = mask_time_min_masks
A : List[Any] = mask_feature_prob
A : Any = mask_feature_length
A : Dict = mask_feature_min_masks
A : Optional[int] = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, is_encoder_decoder=lowerCamelCase__, decoder_start_token_id=lowerCamelCase__, suppress_tokens=lowerCamelCase__, begin_suppress_tokens=lowerCamelCase__, **lowerCamelCase__, )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
A : Tuple = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
A : Dict = {0: """batch"""}
else:
A : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__, direction="""inputs""" )
return common_inputs
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = -1, lowerCamelCase__ = -1, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = 2_2050, lowerCamelCase__ = 5.0, lowerCamelCase__ = 220, ):
A : List[Any] = OrderedDict()
A : Dict = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=lowerCamelCase__, framework=lowerCamelCase__, sampling_rate=lowerCamelCase__, time_duration=lowerCamelCase__, frequency=lowerCamelCase__, )
A : Optional[Any] = encoder_inputs["""input_features"""].shape[2]
A : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
A : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : str = encoder_inputs.pop("""input_features""" )
A : Dict = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
A : Dict = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowerCAmelCase ( self ):
return 1e-3
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
import copy
import re
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
__lowerCamelCase : Any = "hp"
__lowerCamelCase : Dict = {}
__lowerCamelCase : Optional[Any] = None
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = prefix
A : Optional[Any] = defaults
cls.build_naming_info()
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return ""
A : Union[str, Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCamelCase__ ) + 1 ):
A : Union[str, Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A : str = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase__ ):
A : Any = """"""
while integer != 0:
A : Dict = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
A : str = 0
while True:
A : Union[str, Any] = word + """#""" + int_to_alphabetic(lowerCamelCase__ )
if sword in info["reverse_short_word"]:
continue
else:
A : Union[str, Any] = sword
break
A : Any = short_word
A : Optional[int] = word
return short_word
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = param_name.split("""_""" )
A : Optional[Any] = [TrialShortNamer.shortname_for_word(lowerCamelCase__, lowerCamelCase__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A : Any = ["""""", """_"""]
for separator in separators:
A : Optional[Any] = separator.join(lowerCamelCase__ )
if shortname not in info["reverse_short_param"]:
A : str = shortname
A : List[str] = param_name
return shortname
return param_name
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__ ):
A : Any = TrialShortNamer.shortname_for_key(lowerCamelCase__, lowerCamelCase__ )
A : Dict = short_name
A : Tuple = param_name
@classmethod
def _lowerCAmelCase ( cls ):
if cls.NAMING_INFO is not None:
return
A : Tuple = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
A : int = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = info
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
cls.build_naming_info()
assert cls.PREFIX is not None
A : List[Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A : Optional[Any] = cls.NAMING_INFO["""short_param"""][k]
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = 1 if v else 0
A : Union[str, Any] = """""" if isinstance(lowerCamelCase__, (int, float) ) else """-"""
A : Any = f'''{key}{sep}{v}'''
name.append(lowerCamelCase__ )
return "_".join(lowerCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Optional[Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A : List[str] = []
else:
A : List[str] = repr.split("""_""" )
A : Union[str, Any] = {}
for value in values:
if "-" in value:
A , A : Optional[Any] = value.split("""-""" )
else:
A : Any = re.sub("""[0-9.]""", """""", lowerCamelCase__ )
A : Dict = float(re.sub("""[^0-9.]""", """""", lowerCamelCase__ ) )
A : Optional[Any] = cls.NAMING_INFO["""reverse_short_param"""][p_k]
A : int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A : Any = cls.DEFAULTS[k]
return parameters
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1.0e4 , _lowerCAmelCase = False , _lowerCAmelCase = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
A : int = float(embedding_dim // 2 )
A : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
A : Optional[int] = min_timescale * jnp.exp(jnp.arange(_lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
A : int = jnp.expand_dims(_lowerCAmelCase , 1 ) * jnp.expand_dims(_lowerCAmelCase , 0 )
# scale embeddings
A : Any = scale * emb
if flip_sin_to_cos:
A : List[str] = jnp.concatenate([jnp.cos(_lowerCAmelCase ), jnp.sin(_lowerCAmelCase )] , axis=1 )
else:
A : Union[str, Any] = jnp.concatenate([jnp.sin(_lowerCAmelCase ), jnp.cos(_lowerCAmelCase )] , axis=1 )
A : Optional[int] = jnp.reshape(_lowerCAmelCase , [jnp.shape(_lowerCAmelCase )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
__lowerCamelCase : int = 32
__lowerCamelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self, lowerCamelCase__ ):
A : Dict = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="""linear_1""" )(lowerCamelCase__ )
A : Union[str, Any] = nn.silu(lowerCamelCase__ )
A : Dict = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="""linear_2""" )(lowerCamelCase__ )
return temb
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
__lowerCamelCase : int = 32
__lowerCamelCase : bool = False
__lowerCamelCase : float = 1
@nn.compact
def __call__( self, lowerCamelCase__ ):
return get_sinusoidal_embeddings(
lowerCamelCase__, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = StableUnCLIPPipeline
__lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowerCamelCase : List[str] = False
def _lowerCAmelCase ( self ):
A : int = 32
A : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase__, projection_dim=lowerCamelCase__, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
A : Dict = PriorTransformer(
num_attention_heads=2, attention_head_dim=12, embedding_dim=lowerCamelCase__, num_layers=1, )
torch.manual_seed(0 )
A : List[str] = DDPMScheduler(
variance_type="""fixed_small_log""", prediction_type="""sample""", num_train_timesteps=1000, clip_sample=lowerCamelCase__, clip_sample_range=5.0, beta_schedule="""squaredcos_cap_v2""", )
# regular denoising components
torch.manual_seed(0 )
A : Dict = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase__ )
A : Optional[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
A : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase__, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
A : Union[str, Any] = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="""projection""", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase__, layers_per_block=1, upcast_attention=lowerCamelCase__, use_linear_projection=lowerCamelCase__, )
torch.manual_seed(0 )
A : Optional[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""", beta_start=0.0_0085, beta_end=0.012, prediction_type="""v_prediction""", set_alpha_to_one=lowerCamelCase__, steps_offset=1, )
torch.manual_seed(0 )
A : Tuple = AutoencoderKL()
A : int = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0 ):
if str(lowerCamelCase__ ).startswith("""mps""" ):
A : List[Any] = torch.manual_seed(lowerCamelCase__ )
else:
A : Dict = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self ):
A : List[Any] = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase__ )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
A : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
A : str = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A : Optional[Any] = pipe("""anime turle""", generator=lowerCamelCase__, output_type="""np""" )
A : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A : List[str] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa )
A : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A : Optional[int] = pipe(
"""anime turtle""", prior_num_inference_steps=2, num_inference_steps=2, output_type="""np""", )
A : str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self, lowerCamelCase__ ):
return self[-1] < other[-1]
def __eq__( self, lowerCamelCase__ ):
return self[-1] == other[-1]
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
A : list[Stack] = []
# sort into stacks
for element in collection:
A : Tuple = Stack([element] )
A : Any = bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if i != len(_lowerCAmelCase ):
stacks[i].append(_lowerCAmelCase )
else:
stacks.append(_lowerCAmelCase )
# use a heap-based merge to merge stack efficiently
A : Union[str, Any] = merge(*(reversed(_lowerCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[str] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE_:Any = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None ):
A : str = data
A : Union[str, Any] = previous
A : Dict = next_node
def __str__( self ):
return f'''{self.data}'''
def _lowerCAmelCase ( self ):
return self.data
def _lowerCAmelCase ( self ):
return self.next
def _lowerCAmelCase ( self ):
return self.previous
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : Tuple = head
def __iter__( self ):
return self
def _lowerCAmelCase ( self ):
if not self.current:
raise StopIteration
else:
A : Optional[Any] = self.current.get_data()
A : List[str] = self.current.get_next()
return value
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : int = None # First node in list
A : int = None # Last node in list
def __str__( self ):
A : Union[str, Any] = self.head
A : Dict = []
while current is not None:
nodes.append(current.get_data() )
A : Tuple = current.get_next()
return " ".join(str(lowerCamelCase__ ) for node in nodes )
def __contains__( self, lowerCamelCase__ ):
A : str = self.head
while current:
if current.get_data() == value:
return True
A : Dict = current.get_next()
return False
def __iter__( self ):
return LinkedListIterator(self.head )
def _lowerCAmelCase ( self ):
if self.head:
return self.head.get_data()
return None
def _lowerCAmelCase ( self ):
if self.tail:
return self.tail.get_data()
return None
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.head is None:
A : int = node
A : int = node
else:
self.insert_before_node(self.head, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.insert_after_node(self.tail, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = Node(lowerCamelCase__ )
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.set_tail(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = node
A : str = node.previous
if node.get_previous() is None:
A : List[str] = node_to_insert
else:
A : int = node_to_insert
A : List[str] = node_to_insert
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : Any = node
A : Any = node.next
if node.get_next() is None:
A : str = node_to_insert
else:
A : List[Any] = node_to_insert
A : Any = node_to_insert
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[int] = 1
A : Optional[Any] = Node(lowerCamelCase__ )
A : List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase__, lowerCamelCase__ )
return
current_position += 1
A : str = node.next
self.insert_after_node(self.tail, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self.head
while node:
if node.get_data() == item:
return node
A : List[str] = node.get_next()
raise Exception("""Node not found""" )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if (node := self.get_node(lowerCamelCase__ )) is not None:
if node == self.head:
A : Union[str, Any] = self.head.get_next()
if node == self.tail:
A : Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase__ )
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__ ):
if node.get_next():
A : Dict = node.previous
if node.get_previous():
A : List[str] = node.next
A : Dict = None
A : List[str] = None
def _lowerCAmelCase ( self ):
return self.head is None
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:Any = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
return "".join(sorted(_lowerCAmelCase ) )
def __UpperCamelCase ( _lowerCAmelCase ) -> list[str]:
"""simple docstring"""
return word_by_signature[signature(_lowerCAmelCase )]
SCREAMING_SNAKE_CASE_:str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
SCREAMING_SNAKE_CASE_:Union[str, Any] = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE_:List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:str = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = XLMProphetNetTokenizer
__lowerCamelCase : str = False
__lowerCamelCase : Optional[Any] = True
def _lowerCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A : int = XLMProphetNetTokenizer(lowerCamelCase__, keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : str = """[PAD]"""
A : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """[PAD]""" )
self.assertEqual(vocab_keys[1], """[CLS]""" )
self.assertEqual(vocab_keys[-1], """j""" )
self.assertEqual(len(lowerCamelCase__ ), 1012 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 1012 )
def _lowerCAmelCase ( self ):
A : Dict = XLMProphetNetTokenizer(lowerCamelCase__, keep_accents=lowerCamelCase__ )
A : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
A : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
A : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
], )
A : List[str] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
], )
@cached_property
def _lowerCAmelCase ( self ):
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = """Hello World!"""
A : str = [3_5389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : int = {"""input_ids""": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="""microsoft/xprophetnet-large-wiki100-cased""", revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""", )
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
import math
import sys
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
if number != int(_lowerCAmelCase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A : Dict = [-1] * (number + 1)
A : int = 0
for i in range(1 , number + 1 ):
A : str = sys.maxsize
A : str = int(math.sqrt(_lowerCAmelCase ) )
for j in range(1 , root + 1 ):
A : Optional[int] = 1 + answers[i - (j**2)]
A : int = min(_lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = "decision_transformer"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCamelCase__=17, lowerCamelCase__=4, lowerCamelCase__=128, lowerCamelCase__=4096, lowerCamelCase__=True, lowerCamelCase__=1, lowerCamelCase__=1024, lowerCamelCase__=3, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__="relu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=1e-5, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=5_0256, lowerCamelCase__=5_0256, lowerCamelCase__=False, lowerCamelCase__=False, **lowerCamelCase__, ):
A : Tuple = state_dim
A : Tuple = act_dim
A : Tuple = hidden_size
A : Optional[Any] = max_ep_len
A : List[str] = action_tanh
A : List[str] = vocab_size
A : Union[str, Any] = n_positions
A : str = n_layer
A : str = n_head
A : Dict = n_inner
A : List[Any] = activation_function
A : int = resid_pdrop
A : Optional[Any] = embd_pdrop
A : int = attn_pdrop
A : Any = layer_norm_epsilon
A : Dict = initializer_range
A : Dict = scale_attn_weights
A : List[str] = use_cache
A : Optional[Any] = scale_attn_by_inverse_layer_idx
A : Optional[int] = reorder_and_upcast_attn
A : List[Any] = bos_token_id
A : Any = eos_token_id
super().__init__(bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
SCREAMING_SNAKE_CASE_:str = datasets.utils.logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = datasets.Audio()
__lowerCamelCase : Optional[Any] = "audio"
__lowerCamelCase : Optional[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : int = AudioClassification(audio_column="audio" , label_column="label" )
SCREAMING_SNAKE_CASE_:Optional[Any] = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
SCREAMING_SNAKE_CASE_:str = AUDIO_EXTENSIONS
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE_:Tuple = """true"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=82 , _lowerCAmelCase=16 ) -> List[str]:
"""simple docstring"""
set_seed(42 )
A : Any = RegressionModel()
A : Tuple = deepcopy(_lowerCAmelCase )
A : Any = RegressionDataset(length=_lowerCAmelCase )
A : str = DataLoader(_lowerCAmelCase , batch_size=_lowerCAmelCase )
model.to(accelerator.device )
A , A : str = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return model, ddp_model, dataloader
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
"""simple docstring"""
A : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
A : Optional[int] = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_lowerCAmelCase ):
A : int = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
A : int = dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
A : str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCAmelCase ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_lowerCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=16 )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = Accelerator(dispatch_batches=_lowerCAmelCase , split_batches=_lowerCAmelCase )
A : Union[str, Any] = get_dataloader(_lowerCAmelCase , not dispatch_batches )
A : Any = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_lowerCAmelCase )
A , A : Any = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Any = []
for batch in dataloader:
A , A : Optional[Any] = batch.values()
with torch.no_grad():
A : str = model(_lowerCAmelCase )
A , A : Optional[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A , A : Union[str, Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
A , A : Union[str, Any] = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=82 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=16 ) -> Tuple:
"""simple docstring"""
A , A , A : Dict = get_basic_setup(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A , A : Any = generate_predictions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}'''
def __UpperCamelCase ( _lowerCAmelCase = False , _lowerCAmelCase = False ) -> List[str]:
"""simple docstring"""
A : Any = evaluate.load("""glue""" , """mrpc""" )
A , A : List[str] = get_mrpc_setup(_lowerCAmelCase , _lowerCAmelCase )
# First do baseline
A , A , A : List[Any] = setup["""no"""]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
A : List[str] = model(**_lowerCAmelCase )
A : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase , references=batch["""labels"""] )
A : int = metric.compute()
# Then do distributed
A , A , A : List[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A : Optional[int] = model(**_lowerCAmelCase )
A : Optional[int] = outputs.logits.argmax(dim=-1 )
A : Optional[Any] = batch["""labels"""]
A , A : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
A : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
A : Optional[int] = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_lowerCAmelCase , _lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A : Optional[int] = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_lowerCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
A : Optional[int] = Accelerator()
test_torch_metrics(_lowerCAmelCase , 512 )
accelerator.state._reset_state()
def __UpperCamelCase ( _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, **lowerCamelCase__, ):
super().__init__(features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, **lowerCamelCase__ )
A : int = Sql(
cache_dir=lowerCamelCase__, features=lowerCamelCase__, sql=lowerCamelCase__, con=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Any = None
A : Optional[Any] = None
A : Union[str, Any] = None
A : Optional[Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, )
# Build dataset for splits
A : List[Any] = self.builder.as_dataset(
split="""train""", verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
A : List[Any] = dataset
A : int = name
A : str = con
A : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A : List[str] = num_proc
A : str = to_sql_kwargs
def _lowerCAmelCase ( self ):
A : Dict = self.to_sql_kwargs.pop("""sql""", lowerCamelCase__ )
A : Dict = self.to_sql_kwargs.pop("""con""", lowerCamelCase__ )
A : Optional[int] = self.to_sql_kwargs.pop("""index""", lowerCamelCase__ )
A : Optional[Any] = self._write(index=lowerCamelCase__, **self.to_sql_kwargs )
return written
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A , A , A : str = args
A : Dict = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
A : Any = query_table(
table=self.dataset.data, key=slice(lowerCamelCase__, offset + self.batch_size ), indices=self.dataset._indices, )
A : str = batch.to_pandas()
A : Any = df.to_sql(self.name, self.con, index=lowerCamelCase__, **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, **lowerCamelCase__ ):
A : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset ), self.batch_size ), unit="""ba""", disable=not logging.is_progress_bar_enabled(), desc="""Creating SQL from Arrow format""", ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
A , A : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql, [(offset, index, to_sql_kwargs) for offset in range(0, lowerCamelCase__, lowerCamelCase__ )], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="""ba""", disable=not logging.is_progress_bar_enabled(), desc="""Creating SQL from Arrow format""", ):
written += num_rows
return written
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:int = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:List[str] = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:str = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from statistics import mean
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list:
"""simple docstring"""
A : str = 0
# Number of processes finished
A : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A : Optional[int] = [0] * no_of_process
# List to include calculation results
A : str = [0] * no_of_process
# Sort by arrival time.
A : Union[str, Any] = [burst_time[i] for i in np.argsort(_lowerCAmelCase )]
A : Union[str, Any] = [process_name[i] for i in np.argsort(_lowerCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
A : Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A : Union[str, Any] = arrival_time[i]
A : List[str] = 0
# Index showing the location of the process being performed
A : Tuple = 0
# Saves the current response ratio.
A : int = 0
for i in range(0 , _lowerCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A : Union[str, Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A : Dict = temp
A : List[str] = i
# Calculate the turn around time
A : Union[str, Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A : Optional[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list:
"""simple docstring"""
A : Optional[Any] = [0] * no_of_process
for i in range(0 , _lowerCAmelCase ):
A : str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Any = 5
SCREAMING_SNAKE_CASE_:Optional[Any] = ["""A""", """B""", """C""", """D""", """E"""]
SCREAMING_SNAKE_CASE_:Optional[Any] = [1, 2, 3, 4, 5]
SCREAMING_SNAKE_CASE_:Optional[Any] = [1, 2, 3, 4, 5]
SCREAMING_SNAKE_CASE_:List[str] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
SCREAMING_SNAKE_CASE_:Any = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : Dict = metric_id
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = [MetricMock(SCREAMING_SNAKE_CASE__ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _lowerCAmelCase ( self ):
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if "tmp_path" in args:
A : List[Any] = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(_lowerCAmelCase , match="""https://huggingface.co/docs/evaluate""" ):
func(*_lowerCAmelCase )
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = """▁"""
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
SCREAMING_SNAKE_CASE_:List[str] = {
"""facebook/s2t-small-librispeech-asr""": 1_024,
}
SCREAMING_SNAKE_CASE_:Any = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
SCREAMING_SNAKE_CASE_:Optional[int] = {"""mustc""": MUSTC_LANGS}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = MAX_MODEL_INPUT_SIZES
__lowerCamelCase : int = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="<pad>", lowerCamelCase__="<unk>", lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, pad_token=lowerCamelCase__, do_upper_case=lowerCamelCase__, do_lower_case=lowerCamelCase__, tgt_lang=lowerCamelCase__, lang_codes=lowerCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase__, )
A : int = do_upper_case
A : Tuple = do_lower_case
A : Union[str, Any] = load_json(lowerCamelCase__ )
A : Optional[Any] = {v: k for k, v in self.encoder.items()}
A : Tuple = spm_file
A : Optional[int] = load_spm(lowerCamelCase__, self.sp_model_kwargs )
if lang_codes is not None:
A : str = lang_codes
A : Optional[int] = LANGUAGES[lang_codes]
A : List[Any] = [f'''<lang:{lang}>''' for lang in self.langs]
A : Optional[Any] = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
A : Optional[Any] = self.lang_tokens
A : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A : Tuple = {}
@property
def _lowerCAmelCase ( self ):
return len(self.encoder )
@property
def _lowerCAmelCase ( self ):
return self._tgt_lang
@tgt_lang.setter
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = self.lang_code_to_id[tgt_lang]
A : Dict = [lang_code_id]
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__, out_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.encoder.get(lowerCamelCase__, self.encoder[self.unk_token] )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.decoder.get(lowerCamelCase__, self.unk_token )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Any = []
A : List[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A : Any = self.sp_model.decode(lowerCamelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
A : Union[str, Any] = self.sp_model.decode(lowerCamelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__, token_ids_a=lowerCamelCase__, already_has_special_tokens=lowerCamelCase__ )
A : Tuple = [1] * len(self.prefix_tokens )
A : int = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def _lowerCAmelCase ( self ):
A : str = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A : Union[str, Any] = self.__dict__.copy()
A : Tuple = None
return state
def __setstate__( self, lowerCamelCase__ ):
A : Tuple = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : Optional[Any] = {}
A : Tuple = load_spm(self.spm_file, self.sp_model_kwargs )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Dict = Path(lowerCamelCase__ )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
A : Dict = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
A : Tuple = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder, lowerCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file, lowerCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase__, """wb""" ) as fi:
A : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (str(lowerCamelCase__ ), str(lowerCamelCase__ ))
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
A : Union[str, Any] = sentencepiece.SentencePieceProcessor(**_lowerCAmelCase )
spm.Load(str(_lowerCAmelCase ) )
return spm
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(_lowerCAmelCase , """r""" ) as f:
return json.load(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
"""simple docstring"""
with open(_lowerCAmelCase , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=2 )
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
A : Optional[Any] = tau * frequency / samplerate
A : str = sin(_lowerCAmelCase )
A : Optional[Any] = cos(_lowerCAmelCase )
A : Dict = _sin / (2 * q_factor)
A : Tuple = (1 - _cos) / 2
A : List[str] = 1 - _cos
A : int = 1 + alpha
A : List[str] = -2 * _cos
A : List[str] = 1 - alpha
A : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
A : Union[str, Any] = tau * frequency / samplerate
A : Optional[Any] = sin(_lowerCAmelCase )
A : Optional[Any] = cos(_lowerCAmelCase )
A : Tuple = _sin / (2 * q_factor)
A : List[Any] = (1 + _cos) / 2
A : Tuple = -1 - _cos
A : List[str] = 1 + alpha
A : Optional[int] = -2 * _cos
A : List[str] = 1 - alpha
A : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
A : int = tau * frequency / samplerate
A : Dict = sin(_lowerCAmelCase )
A : Any = cos(_lowerCAmelCase )
A : Optional[int] = _sin / (2 * q_factor)
A : Union[str, Any] = _sin / 2
A : str = 0
A : Optional[Any] = -ba
A : int = 1 + alpha
A : List[str] = -2 * _cos
A : List[Any] = 1 - alpha
A : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
A : Dict = tau * frequency / samplerate
A : Any = sin(_lowerCAmelCase )
A : Tuple = cos(_lowerCAmelCase )
A : str = _sin / (2 * q_factor)
A : Tuple = 1 - alpha
A : List[Any] = -2 * _cos
A : Union[str, Any] = 1 + alpha
A : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
A : Union[str, Any] = tau * frequency / samplerate
A : Optional[int] = sin(_lowerCAmelCase )
A : int = cos(_lowerCAmelCase )
A : int = _sin / (2 * q_factor)
A : Optional[Any] = 10 ** (gain_db / 40)
A : Tuple = 1 + alpha * big_a
A : Tuple = -2 * _cos
A : Optional[Any] = 1 - alpha * big_a
A : Optional[Any] = 1 + alpha / big_a
A : List[Any] = -2 * _cos
A : Union[str, Any] = 1 - alpha / big_a
A : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
A : int = tau * frequency / samplerate
A : int = sin(_lowerCAmelCase )
A : Union[str, Any] = cos(_lowerCAmelCase )
A : int = _sin / (2 * q_factor)
A : Optional[int] = 10 ** (gain_db / 40)
A : Tuple = (big_a + 1) - (big_a - 1) * _cos
A : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
A : List[Any] = (big_a - 1) - (big_a + 1) * _cos
A : Tuple = (big_a - 1) + (big_a + 1) * _cos
A : int = 2 * sqrt(_lowerCAmelCase ) * alpha
A : int = big_a * (pmc + aaa)
A : Any = 2 * big_a * mpc
A : Tuple = big_a * (pmc - aaa)
A : Dict = ppmc + aaa
A : Any = -2 * pmpc
A : List[Any] = ppmc - aaa
A : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
A : List[Any] = tau * frequency / samplerate
A : str = sin(_lowerCAmelCase )
A : Optional[int] = cos(_lowerCAmelCase )
A : int = _sin / (2 * q_factor)
A : int = 10 ** (gain_db / 40)
A : List[str] = (big_a + 1) - (big_a - 1) * _cos
A : int = (big_a + 1) + (big_a - 1) * _cos
A : Tuple = (big_a - 1) - (big_a + 1) * _cos
A : List[Any] = (big_a - 1) + (big_a + 1) * _cos
A : str = 2 * sqrt(_lowerCAmelCase ) * alpha
A : Any = big_a * (ppmc + aaa)
A : Tuple = -2 * big_a * pmpc
A : int = big_a * (ppmc - aaa)
A : List[Any] = pmc + aaa
A : Any = 2 * mpc
A : List[Any] = pmc - aaa
A : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_:List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:int = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""CLIPFeatureExtractor"""]
SCREAMING_SNAKE_CASE_:Any = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Tuple = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_:Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE_:Union[str, Any] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
SCREAMING_SNAKE_CASE_:int = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_:Union[str, Any] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
SCREAMING_SNAKE_CASE_:str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : Optional[Any] = len(_lowerCAmelCase )
A : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A : List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A : int = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A : str = subset[i - 1][j]
if arr[i - 1] <= j:
A : str = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
import itertools
import math
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = 2
while True:
if is_prime(_lowerCAmelCase ):
yield num
num += 1
def __UpperCamelCase ( _lowerCAmelCase = 1_0001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , _lowerCAmelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=3, lowerCamelCase__=4, lowerCamelCase__=None, ):
A : Optional[Any] = parent
A : Tuple = batch_size
A : str = seq_length
A : int = is_training
A : int = use_input_mask
A : List[str] = use_token_type_ids
A : str = use_labels
A : Dict = vocab_size
A : List[Any] = hidden_size
A : Dict = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Any = hidden_act
A : str = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : List[str] = max_position_embeddings
A : Union[str, Any] = type_vocab_size
A : List[Any] = type_sequence_label_size
A : Tuple = initializer_range
A : Union[str, Any] = num_labels
A : Tuple = num_choices
A : Tuple = scope
def _lowerCAmelCase ( self ):
A : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Any = None
if self.use_input_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : int = None
if self.use_token_type_ids:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Optional[int] = None
A : List[Any] = None
A : List[Any] = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
A : Union[str, Any] = ids_tensor([self.batch_size], self.num_choices )
A : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
return LlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : str = LlamaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[Any] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : Optional[Any] = True
A : Any = LlamaModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Tuple = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Union[str, Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
A : Tuple = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : str = LlamaForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : Tuple = True
A : Any = True
A : Dict = LlamaForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
A : List[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Tuple = ids_tensor((self.batch_size, 3), config.vocab_size )
A : List[str] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[Any] = torch.cat([input_ids, next_tokens], dim=-1 )
A : str = torch.cat([input_mask, next_mask], dim=-1 )
A : Dict = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : List[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : str = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self ):
A : List[Any] = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCamelCase : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : Dict = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Union[str, Any] = False
def _lowerCAmelCase ( self ):
A : Union[str, Any] = LlamaModelTester(self )
A : List[Any] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : List[str] = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A : int = 3
A : Tuple = input_dict["""input_ids"""]
A : Tuple = input_ids.ne(1 ).to(lowerCamelCase__ )
A : List[Any] = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
A : List[str] = LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : str = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A : List[str] = 3
A : str = """single_label_classification"""
A : Dict = input_dict["""input_ids"""]
A : Tuple = input_ids.ne(1 ).to(lowerCamelCase__ )
A : Tuple = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
A : Optional[int] = LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[Any] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
A , A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] = 3
A : Optional[int] = """multi_label_classification"""
A : List[str] = input_dict["""input_ids"""]
A : List[str] = input_ids.ne(1 ).to(lowerCamelCase__ )
A : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
A : Tuple = LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[Any] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _lowerCAmelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : Any = ids_tensor([1, 10], config.vocab_size )
A : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : Dict = LlamaModel(lowerCamelCase__ )
original_model.to(lowerCamelCase__ )
original_model.eval()
A : Dict = original_model(lowerCamelCase__ ).last_hidden_state
A : Optional[Any] = original_model(lowerCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : Union[str, Any] = {"""type""": scaling_type, """factor""": 10.0}
A : Union[str, Any] = LlamaModel(lowerCamelCase__ )
scaled_model.to(lowerCamelCase__ )
scaled_model.eval()
A : Dict = scaled_model(lowerCamelCase__ ).last_hidden_state
A : List[str] = scaled_model(lowerCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""", device_map="""auto""" )
A : List[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase__, atol=1e-2, rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30], lowerCamelCase__, atol=1e-5, rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""", device_map="""auto""" )
A : str = model(torch.tensor(lowerCamelCase__ ) )
# Expected mean on dim = -1
A : Any = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase__, atol=1e-2, rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A : Optional[int] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30], lowerCamelCase__, atol=1e-5, rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""", device_map="""auto""" )
A : Any = model(torch.tensor(lowerCamelCase__ ) )
# Expected mean on dim = -1
A : str = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase__, atol=1e-2, rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A : Union[str, Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase__, atol=1e-2, rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""", device_map="""auto""" )
A : List[Any] = model(torch.tensor(lowerCamelCase__ ) )
A : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]], dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase__, atol=1e-2, rtol=1e-2 )
# fmt: off
A : Any = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30], lowerCamelCase__, atol=1e-5, rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A : str = """Simply put, the theory of relativity states that """
A : Union[str, Any] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A : Tuple = tokenizer.encode(lowerCamelCase__, return_tensors="""pt""" )
A : Optional[int] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""", device_map="""sequential""", use_safetensors=lowerCamelCase__ )
# greedy generation outputs
A : Tuple = model.generate(lowerCamelCase__, max_new_tokens=64, top_p=lowerCamelCase__, temperature=1, do_sample=lowerCamelCase__ )
A : List[Any] = tokenizer.decode(generated_ids[0], skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ = "cpu", lowerCamelCase__ = "openai/clip-vit-large-patch14" ):
A : Tuple = device
A : Any = CLIPTokenizerFast.from_pretrained(lowerCamelCase__ )
A : Optional[Any] = [0.4814_5466, 0.457_8275, 0.4082_1073]
A : Union[str, Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
A : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
A : List[Any] = torchvision.transforms.Resize(224 )
A : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.resize(lowerCamelCase__ )
A : int = self.center_crop(lowerCamelCase__ )
A : List[str] = self.normalize(lowerCamelCase__ )
return images
def __call__( self, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__ ):
A : str = self.tokenizer(text=lowerCamelCase__, **lowerCamelCase__ )
A : Tuple = self.preprocess_img(lowerCamelCase__ )
A : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__=10, lowerCamelCase__=0.01, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__="image", lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=False, ):
super().__init__()
A : int = None
A : Union[str, Any] = device if device else get_device()
if vqgan:
A : Optional[int] = vqgan
else:
A : Optional[Any] = load_vqgan(self.device, conf_path=lowerCamelCase__, ckpt_path=lowerCamelCase__ )
self.vqgan.eval()
if clip:
A : Union[str, Any] = clip
else:
A : Any = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
A : List[str] = ProcessorGradientFlow(device=self.device )
A : Any = iterations
A : Optional[Any] = lr
A : List[Any] = log
A : int = make_grid
A : Tuple = return_val
A : Union[str, Any] = quantize
A : int = self.vqgan.decoder.z_shape
def _lowerCAmelCase ( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=5, lowerCamelCase__=True ):
A : Any = []
if output_path is None:
A : Union[str, Any] = """./animation.gif"""
if input_path is None:
A : List[str] = self.save_path
A : List[Any] = sorted(glob(input_path + """/*""" ) )
if not len(lowerCamelCase__ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowerCamelCase__ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
A : Optional[int] = total_duration / len(lowerCamelCase__ )
A : Union[str, Any] = [frame_duration] * len(lowerCamelCase__ )
if extend_frames:
A : int = 1.5
A : Optional[int] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowerCamelCase__ ) )
imageio.mimsave(lowerCamelCase__, lowerCamelCase__, duration=lowerCamelCase__ )
print(f'''gif saved to {output_path}''' )
def _lowerCAmelCase ( self, lowerCamelCase__=None, lowerCamelCase__=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
A : Tuple = preprocess(Image.open(lowerCamelCase__ ), target_image_size=256 ).to(self.device )
A : str = preprocess_vqgan(lowerCamelCase__ )
A , *A : Tuple = self.vqgan.encode(lowerCamelCase__ )
return z
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self.latent.detach().requires_grad_()
A : str = base_latent + transform_vector
if self.quantize:
A , *A : str = self.vqgan.quantize(lowerCamelCase__ )
else:
A : Optional[Any] = trans_latent
return self.vqgan.decode(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A : List[str] = self.clip_preprocessor(text=lowerCamelCase__, images=lowerCamelCase__, return_tensors="""pt""", padding=lowerCamelCase__ )
A : int = self.clip(**lowerCamelCase__ )
A : List[Any] = clip_outputs.logits_per_image
if weights is not None:
A : Any = similarity_logits * weights
return similarity_logits.sum()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Dict = self._get_clip_similarity(pos_prompts["""prompts"""], lowerCamelCase__, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
A : Any = self._get_clip_similarity(neg_prompts["""prompts"""], lowerCamelCase__, weights=neg_prompts["""weights"""] )
else:
A : List[str] = torch.tensor([1], device=self.device )
A : Any = -torch.log(lowerCamelCase__ ) + torch.log(lowerCamelCase__ )
return loss
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = torch.randn_like(self.latent, requires_grad=lowerCamelCase__, device=self.device )
A : str = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A : Union[str, Any] = self._add_vector(lowerCamelCase__ )
A : Dict = loop_post_process(lowerCamelCase__ )
A : Any = self._get_CLIP_loss(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
print("""CLIP loss""", lowerCamelCase__ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowerCamelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
wandb.init(reinit=lowerCamelCase__, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
A : Union[str, Any] = Image.open(lowerCamelCase__ )
A : Optional[int] = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(lowerCamelCase__ ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if not prompts:
return []
A : Any = []
A : Dict = []
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : str = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowerCamelCase__, (tuple, list) ):
A : List[Any] = prompt[0]
A : int = float(prompt[1] )
elif ":" in prompt:
A , A : Optional[int] = prompt.split(""":""" )
A : Dict = float(lowerCamelCase__ )
else:
A : Optional[Any] = prompt
A : Dict = 1.0
processed_prompts.append(lowerCamelCase__ )
weights.append(lowerCamelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCamelCase__, device=self.device ),
}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=None, ):
if image_path:
A : int = self._get_latent(lowerCamelCase__ )
else:
A : Tuple = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
A : Tuple = self.process_prompts(lowerCamelCase__ )
A : Union[str, Any] = self.process_prompts(lowerCamelCase__ )
if save_final and save_path is None:
A : List[str] = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
A : str = save_path + """_""" + get_timestamp()
os.makedirs(lowerCamelCase__ )
A : Tuple = save_path
A : List[str] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowerCamelCase__ ) )
A : Optional[Any] = loop_post_process(lowerCamelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) ):
if show_intermediate:
show_pil(lowerCamelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCamelCase__ )} )
if show_final:
show_pil(lowerCamelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f'''iter_{iter:03d}_final.png''' ) )
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from __future__ import annotations
import time
import numpy as np
SCREAMING_SNAKE_CASE_:Union[str, Any] = [8, 5, 9, 7]
SCREAMING_SNAKE_CASE_:Any = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
SCREAMING_SNAKE_CASE_:Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : str = claim_vector
A : int = allocated_resources_table
A : int = maximum_claim_table
def _lowerCAmelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowerCAmelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowerCAmelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowerCAmelCase ( self ):
return {self.__need().index(lowerCamelCase__ ): i for i in self.__need()}
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : Optional[Any] = self.__need()
A : int = self.__allocated_resources_table
A : Any = self.__available_resources()
A : int = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
A : Optional[Any] = False
for each_need in need_list:
A : Dict = True
for index, need in enumerate(lowerCamelCase__ ):
if need > available_resources[index]:
A : List[Any] = False
break
if execution:
A : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A : int = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(lowerCamelCase__ )
# update available/freed resources stack
A : Any = np.array(lowerCamelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(lowerCamelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowerCAmelCase ( self ):
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(lowerCamelCase__ ) + 1}'''
+ """ """.join(f'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(lowerCamelCase__ ) + 1}'''
+ """ """.join(f'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(lowerCamelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(lowerCamelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""], model_result["""ss"""] ):
A : Optional[Any] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = """sshleifer/tiny-gpt2"""
A : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], eager_mode=lowerCamelCase__, multi_process=lowerCamelCase__, )
A : List[Any] = TensorFlowBenchmark(lowerCamelCase__ )
A : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
A : int = """sgugger/tiny-distilbert-classification"""
A : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase__, only_pretrain_model=lowerCamelCase__, )
A : List[str] = TensorFlowBenchmark(lowerCamelCase__ )
A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
A : Dict = """sshleifer/tiny-gpt2"""
A : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase__, )
A : Optional[int] = TensorFlowBenchmark(lowerCamelCase__ )
A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
A : List[Any] = """sshleifer/tiny-gpt2"""
A : Tuple = AutoConfig.from_pretrained(lowerCamelCase__ )
A : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], eager_mode=lowerCamelCase__, multi_process=lowerCamelCase__, )
A : List[Any] = TensorFlowBenchmark(lowerCamelCase__, [config] )
A : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
A : List[str] = """sshleifer/tiny-gpt2"""
A : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
A : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase__, )
A : List[Any] = TensorFlowBenchmark(lowerCamelCase__, [config] )
A : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
A : Any = """sshleifer/tiny-gpt2"""
A : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase__, )
A : List[str] = TensorFlowBenchmark(lowerCamelCase__ )
A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = """sshleifer/tiny-gpt2"""
A : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
A : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase__, )
A : Dict = TensorFlowBenchmark(lowerCamelCase__, [config] )
A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ):
A : Optional[int] = """patrickvonplaten/t5-tiny-random"""
A : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
A : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase__, )
A : List[str] = TensorFlowBenchmark(lowerCamelCase__, configs=[config] )
A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0, """Cannot do xla on CPU.""" )
def _lowerCAmelCase ( self ):
A : Tuple = """sshleifer/tiny-gpt2"""
A : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase__, inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], use_xla=lowerCamelCase__, multi_process=lowerCamelCase__, )
A : Tuple = TensorFlowBenchmark(lowerCamelCase__ )
A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
A : List[Any] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=lowerCamelCase__, save_to_csv=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(lowerCamelCase__, """inf_time.csv""" ), inference_memory_csv_file=os.path.join(lowerCamelCase__, """inf_mem.csv""" ), env_info_csv_file=os.path.join(lowerCamelCase__, """env.csv""" ), multi_process=lowerCamelCase__, )
A : List[str] = TensorFlowBenchmark(lowerCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase__, """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase__, """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase__, """env.csv""" ) ).exists() )
def _lowerCAmelCase ( self ):
A : Any = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowerCamelCase__ ):
self.assertTrue(hasattr(lowerCamelCase__, """sequential""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """cumulative""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """current""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=lowerCamelCase__, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(lowerCamelCase__, """log.txt""" ), log_print=lowerCamelCase__, trace_memory_line_by_line=lowerCamelCase__, eager_mode=lowerCamelCase__, multi_process=lowerCamelCase__, )
A : str = TensorFlowBenchmark(lowerCamelCase__ )
A : List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase__, """log.txt""" ) ).exists() )
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
SCREAMING_SNAKE_CASE_:str = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
SCREAMING_SNAKE_CASE_:Any = float("""nan""")
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : List[Any] = sys.stdout
A : Dict = open(lowerCamelCase__, """a""" )
def __getattr__( self, lowerCamelCase__ ):
return getattr(self.stdout, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
self.stdout.write(lowerCamelCase__ )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""", """""", lowerCamelCase__, 0, re.M ) )
def __UpperCamelCase ( _lowerCAmelCase=80 , _lowerCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
A : Any = []
# deal with critical env vars
A : Optional[Any] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
A : List[Any] = os.environ.get(_lowerCAmelCase , _lowerCAmelCase )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
A : Union[str, Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(_lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A : Tuple = []
A : Tuple = """"""
while len(_lowerCAmelCase ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCAmelCase )
A : int = """"""
return "\\\n".join(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
A : Optional[Any] = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
A : str = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
A : List[str] = subprocess.run(_lowerCAmelCase , capture_output=_lowerCAmelCase , text=_lowerCAmelCase )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
A : Union[str, Any] = variation.replace(""" """ , """-""" )
with open(Path(_lowerCAmelCase ) / f'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(_lowerCAmelCase ) / f'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
A : Union[str, Any] = json.load(_lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = []
A : Tuple = []
A : Any = f'''{id}: {variation:<{longest_variation_len}}'''
A : Union[str, Any] = f'''{preamble}: '''
A : List[str] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCAmelCase ) , desc=_lowerCAmelCase , leave=_lowerCAmelCase ):
A : List[Any] = process_run_single(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(_lowerCAmelCase ):
metrics.append(_lowerCAmelCase )
results.append(_lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
A : Tuple = f'''\33[2K\r{outcome}'''
if len(_lowerCAmelCase ) > 0:
A : Optional[int] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A : Optional[Any] = round(mean_metrics[target_metric_key] , 2 )
A : Tuple = f'''{outcome} {mean_target}'''
if len(_lowerCAmelCase ) > 1:
results_str += f''' {tuple(round(_lowerCAmelCase , 2 ) for x in results )}'''
print(_lowerCAmelCase )
A : int = variation
return mean_metrics
else:
print(_lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
A : List[Any] = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : Tuple = pd.DataFrame(_lowerCAmelCase )
A : str = """variation"""
A : List[str] = """diff_%"""
A : Optional[Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A : Union[str, Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
A : Union[str, Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCAmelCase ):
A : Dict = df.apply(
lambda _lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
A : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A : Tuple = df.reindex(_lowerCAmelCase , axis="""columns""" ) # reorder cols
# capitalize
A : Tuple = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
A : Tuple = df.rename(lambda _lowerCAmelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
A : Optional[int] = df.rename(lambda _lowerCAmelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
A : Union[str, Any] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCAmelCase , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCAmelCase , floatfmt=""".2f""" )]
print("""\n\n""".join(_lowerCAmelCase ) )
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=_lowerCAmelCase , type=_lowerCAmelCase , nargs="""+""" , required=_lowerCAmelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=_lowerCAmelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=_lowerCAmelCase , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=_lowerCAmelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=_lowerCAmelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
A : List[str] = parser.parse_args()
A : List[Any] = args.output_dir
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
A : Dict = get_base_command(_lowerCAmelCase , _lowerCAmelCase )
# split each dimension into its --foo variations
A : str = [list(map(str.strip , re.split(R"""\|""" , _lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A : Tuple = list(map(str.strip , map(""" """.join , itertools.product(*_lowerCAmelCase ) ) ) )
A : Union[str, Any] = max(len(_lowerCAmelCase ) for x in variations )
# split wanted keys
A : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
A : Dict = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
A : Optional[Any] = Tee(_lowerCAmelCase )
print(f'''\n*** Running {len(_lowerCAmelCase )} benchmarks:''' )
print(f'''Base command: {" ".join(_lowerCAmelCase )}''' )
A : List[str] = """variation"""
A : List[Any] = []
for id, variation in enumerate(tqdm(_lowerCAmelCase , desc="""Total completion: """ , leave=_lowerCAmelCase ) ):
A : Any = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.repeat_times , _lowerCAmelCase , args.verbose , ) )
process_results(_lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.base_variation , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
SCREAMING_SNAKE_CASE_:Dict = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = True, ):
A : Tuple = [file for file in os.listdir(lowerCamelCase__ ) if os.path.isfile(os.path.join(lowerCamelCase__, lowerCamelCase__ ) )]
if identifier is not None:
A : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
for n_ in n_identifier:
A : Dict = [file for file in files if n_ not in file]
else:
A : Optional[int] = [file for file in files if n_identifier not in file]
A : Union[str, Any] = ignore_files or []
ignore_files.append("""__init__.py""" )
A : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""", lowerCamelCase__ )
if only_modules:
A : List[Any] = file.split(""".""" )[0]
try:
A : Tuple = getattr(lowerCamelCase__, lowerCamelCase__ )
A : Dict = doctest.DocTestSuite(lowerCamelCase__ )
A : Any = unittest.TextTestRunner().run(lowerCamelCase__ )
self.assertIs(len(result.failures ), 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
A : List[str] = doctest.testfile(str("""..""" / directory / file ), optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed, 0 )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = Path("""src/transformers""" )
A : Optional[int] = """modeling"""
A : Optional[int] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase__, identifier=lowerCamelCase__, ignore_files=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = Path("""src/transformers""" )
A : Tuple = """tokenization"""
self.analyze_directory(lowerCamelCase__, identifier=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = Path("""src/transformers""" )
A : Any = """configuration"""
self.analyze_directory(lowerCamelCase__, identifier=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Any = Path("""src/transformers""" )
A : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase__, n_identifier=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = Path("""docs/source""" )
A : Tuple = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase__, ignore_files=lowerCamelCase__, only_modules=lowerCamelCase__ )
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:Tuple = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:int = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Tuple = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = "dinat"
__lowerCamelCase : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, lowerCamelCase__=4, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[3, 4, 6, 5], lowerCamelCase__=[2, 4, 8, 16], lowerCamelCase__=7, lowerCamelCase__=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], lowerCamelCase__=3.0, lowerCamelCase__=True, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.1, lowerCamelCase__="gelu", lowerCamelCase__=0.02, lowerCamelCase__=1e-5, lowerCamelCase__=0.0, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : str = patch_size
A : Dict = num_channels
A : Tuple = embed_dim
A : Any = depths
A : Union[str, Any] = len(lowerCamelCase__ )
A : List[str] = num_heads
A : Any = kernel_size
A : Any = dilations
A : List[str] = mlp_ratio
A : Optional[int] = qkv_bias
A : Dict = hidden_dropout_prob
A : str = attention_probs_dropout_prob
A : int = drop_path_rate
A : Tuple = hidden_act
A : List[str] = layer_norm_eps
A : Dict = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A : Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
A : List[str] = layer_scale_init_value
A : List[str] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Tuple = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
A : Tuple = downstream_dict["""projector.weight"""]
A : Dict = downstream_dict["""projector.bias"""]
A : Tuple = downstream_dict["""model.post_net.linear.weight"""]
A : Dict = downstream_dict["""model.post_net.linear.bias"""]
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : Tuple = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
A : List[Any] = downstream_dict["""model.linear.weight"""]
A : str = downstream_dict["""model.linear.bias"""]
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = UniSpeechSatForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
A : Union[str, Any] = downstream_dict["""connector.weight"""]
A : Optional[int] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A : int = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
A : Optional[int] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
A : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A : Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
A : List[str] = checkpoint["""Downstream"""]
A : str = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
A : Any = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
A : Dict = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A : Any = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
A : Any = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("""ForXVector""" ):
A : Dict = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
A : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE_:List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import operator
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None ) -> list:
"""simple docstring"""
A : Optional[int] = operator.lt if reverse else operator.gt
A : Dict = solution or []
if not arr:
return solution
A : Optional[int] = [arr.pop(0 )]
for i, item in enumerate(_lowerCAmelCase ):
if _operator(_lowerCAmelCase , sublist[-1] ):
sublist.append(_lowerCAmelCase )
arr.pop(_lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(_lowerCAmelCase )
else:
while sublist:
A : Tuple = sublist.pop(0 )
for i, xx in enumerate(_lowerCAmelCase ):
if not _operator(_lowerCAmelCase , _lowerCAmelCase ):
solution.insert(_lowerCAmelCase , _lowerCAmelCase )
break
else:
solution.append(_lowerCAmelCase )
strand_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
SCREAMING_SNAKE_CASE_:Optional[int] = None
SCREAMING_SNAKE_CASE_:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_:int = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_:Dict = {
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
SCREAMING_SNAKE_CASE_:Any = """▁"""
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = BigBirdTokenizer
__lowerCamelCase : Optional[Any] = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="<unk>", lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="<pad>", lowerCamelCase__="[SEP]", lowerCamelCase__="[MASK]", lowerCamelCase__="[CLS]", **lowerCamelCase__, ):
A : Dict = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else bos_token
A : List[Any] = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else eos_token
A : str = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else unk_token
A : List[Any] = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else pad_token
A : Union[str, Any] = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else cls_token
A : str = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A : str = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__, tokenizer_file=lowerCamelCase__, bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, sep_token=lowerCamelCase__, pad_token=lowerCamelCase__, cls_token=lowerCamelCase__, mask_token=lowerCamelCase__, **lowerCamelCase__, )
A : Dict = vocab_file
A : Any = False if not self.vocab_file else True
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Tuple = [self.sep_token_id]
A : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Optional[Any] = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file, lowerCamelCase__ )
return (out_vocab_file,)
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
SCREAMING_SNAKE_CASE_:Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
A : Optional[Any] = model_type_to_module_name(_lowerCAmelCase )
A : List[str] = importlib.import_module(f'''.{module_name}''' , """transformers.models""" )
try:
return getattr(_lowerCAmelCase , _lowerCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowerCAmelCase , """__name__""" , _lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A : Optional[int] = importlib.import_module("""transformers""" )
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
return getattr(_lowerCAmelCase , _lowerCAmelCase )
return None
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> Tuple:
"""simple docstring"""
A : List[Any] = get_file_from_repo(
_lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as reader:
return json.load(_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase__ )
def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ):
A : Tuple = kwargs.pop("""config""", lowerCamelCase__ )
A : int = kwargs.pop("""trust_remote_code""", lowerCamelCase__ )
A : Any = True
A , A : Any = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase__, **lowerCamelCase__ )
A : List[str] = config_dict.get("""feature_extractor_type""", lowerCamelCase__ )
A : Tuple = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""", {} ):
A : Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = AutoConfig.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
# It could be in `config.feature_extractor_type``
A : List[Any] = getattr(lowerCamelCase__, """feature_extractor_type""", lowerCamelCase__ )
if hasattr(lowerCamelCase__, """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
A : Optional[Any] = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
A : int = feature_extractor_class_from_name(lowerCamelCase__ )
A : int = feature_extractor_auto_map is not None
A : Dict = feature_extractor_class is not None or type(lowerCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING
A : Union[str, Any] = resolve_trust_remote_code(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
if has_remote_code and trust_remote_code:
A : Optional[Any] = get_class_from_dynamic_module(
lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
A : Any = kwargs.pop("""code_revision""", lowerCamelCase__ )
if os.path.isdir(lowerCamelCase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase__, **lowerCamelCase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase__, **lowerCamelCase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING:
A : List[Any] = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase__ )]
return feature_extractor_class.from_dict(lowerCamelCase__, **lowerCamelCase__ )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__ ):
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase__, lowerCamelCase__ )
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = "openai-gpt"
__lowerCamelCase : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCamelCase__=4_0478, lowerCamelCase__=512, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=1e-5, lowerCamelCase__=0.02, lowerCamelCase__="cls_index", lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=0.1, **lowerCamelCase__, ):
A : Any = vocab_size
A : Dict = n_positions
A : Tuple = n_embd
A : Any = n_layer
A : Dict = n_head
A : Union[str, Any] = afn
A : Dict = resid_pdrop
A : Union[str, Any] = embd_pdrop
A : Any = attn_pdrop
A : Any = layer_norm_epsilon
A : Optional[Any] = initializer_range
A : Union[str, Any] = summary_type
A : Union[str, Any] = summary_use_proj
A : str = summary_activation
A : Optional[Any] = summary_first_dropout
A : Optional[int] = summary_proj_to_labels
super().__init__(**lowerCamelCase__ )
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : Any = value
A : Node | None = None
A : Node | None = None
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : int = tree
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = "Speech2TextFeatureExtractor"
__lowerCamelCase : Optional[int] = "Speech2TextTokenizer"
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__(lowerCamelCase__, lowerCamelCase__ )
A : Any = self.feature_extractor
A : Optional[Any] = False
def __call__( self, *lowerCamelCase__, **lowerCamelCase__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__, **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A : Optional[Any] = kwargs.pop("""raw_speech""" )
else:
A : Optional[Any] = kwargs.pop("""audio""", lowerCamelCase__ )
A : Any = kwargs.pop("""sampling_rate""", lowerCamelCase__ )
A : Tuple = kwargs.pop("""text""", lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A : List[str] = args[0]
A : Any = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A : List[str] = self.feature_extractor(lowerCamelCase__, *lowerCamelCase__, sampling_rate=lowerCamelCase__, **lowerCamelCase__ )
if text is not None:
A : List[str] = self.tokenizer(lowerCamelCase__, **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A : int = encodings["""input_ids"""]
return inputs
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.decode(*lowerCamelCase__, **lowerCamelCase__ )
@contextmanager
def _lowerCAmelCase ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A : Any = True
A : Any = self.tokenizer
yield
A : Any = self.feature_extractor
A : List[str] = False
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = LxmertConfig.from_json_file(_lowerCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
A : Optional[int] = LxmertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE_:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Tuple = """▁"""
SCREAMING_SNAKE_CASE_:Any = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""google/reformer-crime-and-punishment""": 524_288,
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = ["input_ids", "attention_mask"]
def __init__( self, lowerCamelCase__, lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__=[], lowerCamelCase__ = None, **lowerCamelCase__, ):
A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, additional_special_tokens=lowerCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase__, )
A : int = vocab_file
A : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def _lowerCAmelCase ( self ):
return self.sp_model.get_piece_size()
def _lowerCAmelCase ( self ):
A : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A : List[Any] = self.__dict__.copy()
A : Any = None
return state
def __setstate__( self, lowerCamelCase__ ):
A : Dict = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : List[str] = {}
A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__, out_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.piece_to_id(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if index < self.sp_model.get_piece_size():
A : int = self.sp_model.IdToPiece(lowerCamelCase__ )
return token
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Any = []
A : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
A : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Optional[Any] = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__, """wb""" ) as fi:
A : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.